Compare commits

..

8 Commits

Author SHA1 Message Date
dependabot[bot]
25bf6da516 ci: [DEPENDABOT] bump tj-actions/changed-files from 46.0.5 to 47.0.4
Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 46.0.5 to 47.0.4.
- [Release notes](https://github.com/tj-actions/changed-files/releases)
- [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md)
- [Commits](ed68ef82c0...7dee1b0c15)

---
updated-dependencies:
- dependency-name: tj-actions/changed-files
  dependency-version: 47.0.4
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-02-20 09:19:50 +00:00
Ayaz Salikhov
d03d72bfd5 ci: Add dependabot config (#6379) 2026-02-20 09:19:00 +00:00
Ed Hennis
6f35d94b2f Fix tautological assertion (#6393) 2026-02-20 01:58:47 +00:00
Ayaz Salikhov
2c1fad1023 chore: Apply clang-format width 100 (#6387) 2026-02-19 23:30:00 +00:00
Ayaz Salikhov
25cca46553 chore: Set clang-format width to 100 in config file (#6387) 2026-02-19 23:29:46 +00:00
Ayaz Salikhov
469ce9f291 chore: Set cmake-format width to 100 (#6386) 2026-02-19 19:42:51 +00:00
Sergey Kuznetsov
31302877ab ci: Add clang tidy workflow to ci (#6369) 2026-02-19 14:06:44 -05:00
Jingchen
0976b2b68b refactor: Modularize app/tx (#6228) 2026-02-17 18:10:07 +00:00
898 changed files with 27312 additions and 13247 deletions

View File

@@ -37,7 +37,7 @@ BinPackParameters: false
BreakBeforeBinaryOperators: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120
ColumnLimit: 100
CommentPragmas: "^ IWYU pragma:"
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4

191
.clang-tidy Normal file
View File

@@ -0,0 +1,191 @@
---
Checks: "-*,
bugprone-argument-comment
"
# bugprone-assert-side-effect,
# bugprone-bad-signal-to-kill-thread,
# bugprone-bool-pointer-implicit-conversion,
# bugprone-casting-through-void,
# bugprone-chained-comparison,
# bugprone-compare-pointer-to-member-virtual-function,
# bugprone-copy-constructor-init,
# bugprone-crtp-constructor-accessibility,
# bugprone-dangling-handle,
# bugprone-dynamic-static-initializers,
# bugprone-empty-catch,
# bugprone-fold-init-type,
# bugprone-forward-declaration-namespace,
# bugprone-inaccurate-erase,
# bugprone-inc-dec-in-conditions,
# bugprone-incorrect-enable-if,
# bugprone-incorrect-roundings,
# bugprone-infinite-loop,
# bugprone-integer-division,
# bugprone-lambda-function-name,
# bugprone-macro-parentheses,
# bugprone-macro-repeated-side-effects,
# bugprone-misplaced-operator-in-strlen-in-alloc,
# bugprone-misplaced-pointer-arithmetic-in-alloc,
# bugprone-misplaced-widening-cast,
# bugprone-move-forwarding-reference,
# bugprone-multi-level-implicit-pointer-conversion,
# bugprone-multiple-new-in-one-expression,
# bugprone-multiple-statement-macro,
# bugprone-no-escape,
# bugprone-non-zero-enum-to-bool-conversion,
# bugprone-optional-value-conversion,
# bugprone-parent-virtual-call,
# bugprone-pointer-arithmetic-on-polymorphic-object,
# bugprone-posix-return,
# bugprone-redundant-branch-condition,
# bugprone-reserved-identifier,
# bugprone-return-const-ref-from-parameter,
# bugprone-shared-ptr-array-mismatch,
# bugprone-signal-handler,
# bugprone-signed-char-misuse,
# bugprone-sizeof-container,
# bugprone-sizeof-expression,
# bugprone-spuriously-wake-up-functions,
# bugprone-standalone-empty,
# bugprone-string-constructor,
# bugprone-string-integer-assignment,
# bugprone-string-literal-with-embedded-nul,
# bugprone-stringview-nullptr,
# bugprone-suspicious-enum-usage,
# bugprone-suspicious-include,
# bugprone-suspicious-memory-comparison,
# bugprone-suspicious-memset-usage,
# bugprone-suspicious-missing-comma,
# bugprone-suspicious-realloc-usage,
# bugprone-suspicious-semicolon,
# bugprone-suspicious-string-compare,
# bugprone-suspicious-stringview-data-usage,
# bugprone-swapped-arguments,
# bugprone-switch-missing-default-case,
# bugprone-terminating-continue,
# bugprone-throw-keyword-missing,
# bugprone-too-small-loop-variable,
# bugprone-undefined-memory-manipulation,
# bugprone-undelegated-constructor,
# bugprone-unhandled-exception-at-new,
# bugprone-unhandled-self-assignment,
# bugprone-unique-ptr-array-mismatch,
# bugprone-unsafe-functions,
# bugprone-unused-local-non-trivial-variable,
# bugprone-unused-raii,
# bugprone-unused-return-value,
# bugprone-use-after-move,
# bugprone-virtual-near-miss,
# cppcoreguidelines-init-variables,
# cppcoreguidelines-misleading-capture-default-by-value,
# cppcoreguidelines-no-suspend-with-lock,
# cppcoreguidelines-pro-type-member-init,
# cppcoreguidelines-pro-type-static-cast-downcast,
# cppcoreguidelines-rvalue-reference-param-not-moved,
# cppcoreguidelines-use-default-member-init,
# cppcoreguidelines-virtual-class-destructor,
# hicpp-ignored-remove-result,
# llvm-namespace-comment,
# misc-const-correctness,
# misc-definitions-in-headers,
# misc-header-include-cycle,
# misc-include-cleaner,
# misc-misplaced-const,
# misc-redundant-expression,
# misc-static-assert,
# misc-throw-by-value-catch-by-reference,
# misc-unused-alias-decls,
# misc-unused-using-decls,
# modernize-concat-nested-namespaces,
# modernize-deprecated-headers,
# modernize-make-shared,
# modernize-make-unique,
# modernize-pass-by-value,
# modernize-type-traits,
# modernize-use-designated-initializers,
# modernize-use-emplace,
# modernize-use-equals-default,
# modernize-use-equals-delete,
# modernize-use-override,
# modernize-use-ranges,
# modernize-use-starts-ends-with,
# modernize-use-std-numbers,
# modernize-use-using,
# performance-faster-string-find,
# performance-for-range-copy,
# performance-implicit-conversion-in-loop,
# performance-inefficient-vector-operation,
# performance-move-const-arg,
# performance-move-constructor-init,
# performance-no-automatic-move,
# performance-trivially-destructible,
# readability-avoid-nested-conditional-operator,
# readability-avoid-return-with-void-value,
# readability-braces-around-statements,
# readability-const-return-type,
# readability-container-contains,
# readability-container-size-empty,
# readability-convert-member-functions-to-static,
# readability-duplicate-include,
# readability-else-after-return,
# readability-enum-initial-value,
# readability-implicit-bool-conversion,
# readability-inconsistent-declaration-parameter-name,
# readability-identifier-naming,
# readability-make-member-function-const,
# readability-math-missing-parentheses,
# readability-misleading-indentation,
# readability-non-const-parameter,
# readability-redundant-casting,
# readability-redundant-declaration,
# readability-redundant-inline-specifier,
# readability-redundant-member-init,
# readability-redundant-string-init,
# readability-reference-to-constructed-temporary,
# readability-simplify-boolean-expr,
# readability-static-accessed-through-instance,
# readability-static-definition-in-anonymous-namespace,
# readability-suspicious-call-argument,
# readability-use-std-min-max
#
# CheckOptions:
# readability-braces-around-statements.ShortStatementLines: 2
# readability-identifier-naming.MacroDefinitionCase: UPPER_CASE
# readability-identifier-naming.ClassCase: CamelCase
# readability-identifier-naming.StructCase: CamelCase
# readability-identifier-naming.UnionCase: CamelCase
# readability-identifier-naming.EnumCase: CamelCase
# readability-identifier-naming.EnumConstantCase: CamelCase
# readability-identifier-naming.ScopedEnumConstantCase: CamelCase
# readability-identifier-naming.GlobalConstantCase: UPPER_CASE
# readability-identifier-naming.GlobalConstantPrefix: "k"
# readability-identifier-naming.GlobalVariableCase: CamelCase
# readability-identifier-naming.GlobalVariablePrefix: "g"
# readability-identifier-naming.ConstexprFunctionCase: camelBack
# readability-identifier-naming.ConstexprMethodCase: camelBack
# readability-identifier-naming.ClassMethodCase: camelBack
# readability-identifier-naming.ClassMemberCase: camelBack
# readability-identifier-naming.ClassConstantCase: UPPER_CASE
# readability-identifier-naming.ClassConstantPrefix: "k"
# readability-identifier-naming.StaticConstantCase: UPPER_CASE
# readability-identifier-naming.StaticConstantPrefix: "k"
# readability-identifier-naming.StaticVariableCase: UPPER_CASE
# readability-identifier-naming.StaticVariablePrefix: "k"
# readability-identifier-naming.ConstexprVariableCase: UPPER_CASE
# readability-identifier-naming.ConstexprVariablePrefix: "k"
# readability-identifier-naming.LocalConstantCase: camelBack
# readability-identifier-naming.LocalVariableCase: camelBack
# readability-identifier-naming.TemplateParameterCase: CamelCase
# readability-identifier-naming.ParameterCase: camelBack
# readability-identifier-naming.FunctionCase: camelBack
# readability-identifier-naming.MemberCase: camelBack
# readability-identifier-naming.PrivateMemberSuffix: _
# readability-identifier-naming.ProtectedMemberSuffix: _
# readability-identifier-naming.PublicMemberSuffix: ""
# readability-identifier-naming.FunctionIgnoredRegexp: ".*tag_invoke.*"
# bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
# bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc
# misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*;.*(expected|unexpected).*;.*ranges_lower_bound\.h;time.h;stdlib.h;__chrono/.*;fmt/chrono.h;boost/uuid/uuid_hash.hpp'
#
# HeaderFilterRegex: '^.*/(src|tests)/.*\.(h|hpp)$'
WarningsAsErrors: "*"

View File

@@ -29,7 +29,7 @@ format:
disable: false
_help_line_width:
- How wide to allow formatted cmake files
line_width: 120
line_width: 100
_help_tab_size:
- How many spaces to tab for indent
tab_size: 4

56
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,56 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/build-deps/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/generate-version/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/print-env/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop
- package-ecosystem: github-actions
directory: .github/actions/setup-conan/
schedule:
interval: weekly
day: monday
time: "04:00"
timezone: Etc/GMT
commit-message:
prefix: "ci: [DEPENDABOT] "
target-branch: develop

View File

@@ -70,7 +70,7 @@ that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
## Validation
The [levelization](generate.py) script takes no parameters,
The [levelization](generate.sh) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
@@ -104,7 +104,7 @@ It generates many files of [results](results):
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
done anything else to improve levelization, run `generate.py`,
done anything else to improve levelization, run `levelization.sh`,
and commit the updated results.
The `loops.txt` and `ordering.txt` files relate the modules
@@ -128,7 +128,7 @@ The committed files hide the detailed values intentionally, to
prevent false alarms and merging issues, and because it's easy to
get those details locally.
1. Run `generate.py`
1. Run `levelization.sh`
2. Grep the modules in `paths.txt`.
- For example, if a cycle is found `A ~= B`, simply `grep -w
A .github/scripts/levelization/results/paths.txt | grep -w B`

View File

@@ -1,369 +0,0 @@
#!/usr/bin/env python3
"""
Usage: generate.py
This script takes no parameters, and can be run from any directory,
as long as it is in the expected.
location in the repo.
"""
import os
import re
import subprocess
import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple, Set, Optional
# Compile regex patterns once at module level
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')
def dictionary_sort_key(s: str) -> str:
"""
Create a sort key that mimics 'sort -d' (dictionary order).
Dictionary order only considers blanks and alphanumeric characters.
This means punctuation like '.' is ignored during sorting.
"""
# Keep only alphanumeric characters and spaces
return "".join(c for c in s if c.isalnum() or c.isspace())
def get_level(file_path: str) -> str:
"""
Extract the level from a file path (second and third directory components).
Equivalent to bash: cut -d/ -f 2,3
Examples:
src/xrpld/app/main.cpp -> xrpld.app
src/libxrpl/protocol/STObject.cpp -> libxrpl.protocol
include/xrpl/basics/base_uint.h -> xrpl.basics
"""
parts = file_path.split("/")
# Get fields 2 and 3 (indices 1 and 2 in 0-based indexing)
if len(parts) >= 3:
level = f"{parts[1]}/{parts[2]}"
elif len(parts) >= 2:
level = f"{parts[1]}/toplevel"
else:
level = file_path
# If the "level" indicates a file, cut off the filename
if "." in level.split("/")[-1]: # Avoid Path object creation
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level = level.rsplit("/", 1)[0] + "/toplevel"
return level.replace("/", ".")
def extract_include_level(include_line: str) -> Optional[str]:
"""
Extract the include path from an #include directive.
Gets the first two directory components from the include path.
Equivalent to bash: cut -d/ -f 1,2
Examples:
#include <xrpl/basics/base_uint.h> -> xrpl.basics
#include "xrpld/app/main/Application.h" -> xrpld.app
"""
# Remove everything before the quote or angle bracket
match = INCLUDE_PATH_PATTERN.search(include_line)
if not match:
return None
include_path = match.group(1)
parts = include_path.split("/")
# Get first two fields (indices 0 and 1)
if len(parts) >= 2:
include_level = f"{parts[0]}/{parts[1]}"
else:
include_level = include_path
# If the "includelevel" indicates a file, cut off the filename
if "." in include_level.split("/")[-1]: # Avoid Path object creation
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"
return include_level.replace("/", ".")
def find_repo_root(start_path: Path, depth_limit: int = 10) -> Path:
"""
Find the repository root by looking for .git directory or src/include folders.
Walks up the directory tree from the start path.
"""
current = start_path.resolve()
# Walk up the directory tree
for _ in range(depth_limit): # Limit search depth to prevent infinite loops
# Check if this directory has src or include folders
has_src = (current / "src").exists()
has_include = (current / "include").exists()
if has_src or has_include:
return current
# Check if this is a git repository root
if (current / ".git").exists():
# Check if it has src or include nearby
if has_src or has_include:
return current
# Move up one level
parent = current.parent
if parent == current: # Reached filesystem root
break
current = parent
# If we couldn't find it, raise an error
raise RuntimeError(
"Could not find repository root. "
"Expected to find a directory containing 'src' and/or 'include' folders."
)
def get_scan_directories(repo_root: Path) -> List[Path]:
"""
Get the list of directories to scan for include files.
Returns paths that actually exist.
"""
directories = []
for dir_name in ["include", "src"]:
dir_path = repo_root / dir_name
if dir_path.exists() and dir_path.is_dir():
directories.append(dir_path)
if not directories:
raise RuntimeError(f"No 'src' or 'include' directories found in {repo_root}")
return directories
def main():
# Change to the script's directory
script_dir = Path(__file__).parent.resolve()
os.chdir(script_dir)
# If the shell is interactive, clean up any flotsam before analyzing
# Match bash behavior: check if PS1 is set (indicates interactive shell)
# When running a script, PS1 is not set even if stdin/stdout are TTYs
if os.environ.get("PS1"):
try:
subprocess.run(["git", "clean", "-ix"], check=False, timeout=30)
except (subprocess.TimeoutExpired, KeyboardInterrupt):
print("Skipping git clean...")
except Exception:
# If git clean fails for any reason, just continue
pass
# Clean up and create results directory
results_dir = script_dir / "results"
if results_dir.exists():
import shutil
shutil.rmtree(results_dir)
results_dir.mkdir()
# Find the repository root by searching for src and include directories
try:
repo_root = find_repo_root(script_dir)
scan_dirs = get_scan_directories(repo_root)
print(f"Found repository root: {repo_root}")
print(f"Scanning directories:")
for scan_dir in scan_dirs:
print(f" - {scan_dir.relative_to(repo_root)}")
except RuntimeError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
print("\nScanning for raw includes...")
# Find all #include directives
raw_includes: List[Tuple[str, str]] = []
rawincludes_file = results_dir / "rawincludes.txt"
# Write to file as we go to avoid storing everything in memory
with open(rawincludes_file, "w", buffering=8192) as raw_f:
for dir_path in scan_dirs:
print(f" Scanning {dir_path.relative_to(repo_root)}...")
for file_path in dir_path.rglob("*"):
if not file_path.is_file():
continue
try:
rel_path_str = str(file_path.relative_to(repo_root))
# Read file with larger buffer for better performance
with open(
file_path,
"r",
encoding="utf-8",
errors="ignore",
buffering=8192,
) as f:
for line in f:
# Quick check before regex
if "#include" not in line or "boost" in line:
continue
if INCLUDE_PATTERN.match(line):
line_stripped = line.strip()
entry = f"{rel_path_str}:{line_stripped}\n"
print(entry, end="")
raw_f.write(entry)
raw_includes.append((rel_path_str, line_stripped))
except Exception as e:
print(f"Error reading {file_path}: {e}", file=sys.stderr)
# Build levelization paths and count directly (no need to sort first)
print("Build levelization paths")
path_counts: Dict[Tuple[str, str], int] = defaultdict(int)
for file_path, include_line in raw_includes:
level = get_level(file_path)
include_level = extract_include_level(include_line)
if include_level and level != include_level:
path_counts[(level, include_level)] += 1
# Sort and deduplicate paths (using dictionary order like bash 'sort -d')
print("Sort and deduplicate paths")
paths_file = results_dir / "paths.txt"
with open(paths_file, "w") as f:
# Sort using dictionary order: only alphanumeric and spaces matter
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
for (level, include_level), count in sorted_items:
line = f"{count:7} {level} {include_level}\n"
print(line.rstrip())
f.write(line)
# Split into flat-file database
print("Split into flat-file database")
includes_dir = results_dir / "includes"
included_by_dir = results_dir / "included_by"
includes_dir.mkdir()
included_by_dir.mkdir()
# Batch writes by grouping data first to avoid repeated file opens
includes_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
included_by_data: Dict[str, List[Tuple[str, int]]] = defaultdict(list)
# Process in sorted order to match bash script behavior (dictionary order)
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)
for (level, include_level), count in sorted_items:
includes_data[level].append((include_level, count))
included_by_data[include_level].append((level, count))
# Write all includes files in sorted order (dictionary order)
for level in sorted(includes_data.keys(), key=dictionary_sort_key):
entries = includes_data[level]
with open(includes_dir / level, "w") as f:
for include_level, count in entries:
line = f"{include_level} {count}\n"
print(line.rstrip())
f.write(line)
# Write all included_by files in sorted order (dictionary order)
for include_level in sorted(included_by_data.keys(), key=dictionary_sort_key):
entries = included_by_data[include_level]
with open(included_by_dir / include_level, "w") as f:
for level, count in entries:
line = f"{level} {count}\n"
print(line.rstrip())
f.write(line)
# Search for loops
print("Search for loops")
loops_file = results_dir / "loops.txt"
ordering_file = results_dir / "ordering.txt"
loops_found: Set[Tuple[str, str]] = set()
# Pre-load all include files into memory to avoid repeated I/O
# This is the biggest optimization - we were reading files repeatedly in nested loops
# Use list of tuples to preserve file order
includes_cache: Dict[str, List[Tuple[str, int]]] = {}
includes_lookup: Dict[str, Dict[str, int]] = {} # For fast lookup
# Note: bash script uses 'for source in *' which uses standard glob sorting,
# NOT dictionary order. So we use standard sorted() here, not dictionary_sort_key.
for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
if not include_file.is_file():
continue
includes_cache[include_file.name] = []
includes_lookup[include_file.name] = {}
with open(include_file, "r") as f:
for line in f:
parts = line.strip().split()
if len(parts) >= 2:
include_name = parts[0]
include_count = int(parts[1])
includes_cache[include_file.name].append(
(include_name, include_count)
)
includes_lookup[include_file.name][include_name] = include_count
with open(loops_file, "w", buffering=8192) as loops_f, open(
ordering_file, "w", buffering=8192
) as ordering_f:
# Use standard sorting to match bash glob expansion 'for source in *'
for source in sorted(includes_cache.keys()):
source_includes = includes_cache[source]
for include, include_freq in source_includes:
# Check if include file exists and references source
if include not in includes_lookup:
continue
source_freq = includes_lookup[include].get(source)
if source_freq is not None:
# Found a loop
loop_key = tuple(sorted([source, include]))
if loop_key in loops_found:
continue
loops_found.add(loop_key)
loops_f.write(f"Loop: {source} {include}\n")
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
diff = include_freq - source_freq
if diff > 3:
loops_f.write(f" {source} > {include}\n\n")
elif diff < -3:
loops_f.write(f" {include} > {source}\n\n")
elif source_freq == include_freq:
loops_f.write(f" {include} == {source}\n\n")
else:
loops_f.write(f" {include} ~= {source}\n\n")
else:
ordering_f.write(f"{source} > {include}\n")
# Print results
print("\nOrdering:")
with open(ordering_file, "r") as f:
print(f.read(), end="")
print("\nLoops:")
with open(loops_file, "r") as f:
print(f.read(), end="")
if __name__ == "__main__":
main()

130
.github/scripts/levelization/generate.sh vendored Executable file
View File

@@ -0,0 +1,130 @@
#!/bin/bash
# Usage: generate.sh
# This script takes no parameters, reads no environment variables,
# and can be run from any directory, as long as it is in the expected
# location in the repo.
pushd $( dirname $0 )
if [ -v PS1 ]
then
# if the shell is interactive, clean up any flotsam before analyzing
git clean -ix
fi
# Ensure all sorting is ASCII-order consistently across platforms.
export LANG=C
rm -rfv results
mkdir results
includes="$( pwd )/results/rawincludes.txt"
pushd ../../..
echo Raw includes:
grep -r '^[ ]*#include.*/.*\.h' include src | \
grep -v boost | tee ${includes}
popd
pushd results
oldifs=${IFS}
IFS=:
mkdir includes
mkdir included_by
echo Build levelization paths
exec 3< ${includes} # open rawincludes.txt for input
while read -r -u 3 file include
do
level=$( echo ${file} | cut -d/ -f 2,3 )
# If the "level" indicates a file, cut off the filename
if [[ "${level##*.}" != "${level}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level="$( dirname ${level} )/toplevel"
fi
level=$( echo ${level} | tr '/' '.' )
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
cut -d/ -f 1,2 )
if [[ "${includelevel##*.}" != "${includelevel}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
includelevel="$( dirname ${includelevel} )/toplevel"
fi
includelevel=$( echo ${includelevel} | tr '/' '.' )
if [[ "$level" != "$includelevel" ]]
then
echo $level $includelevel | tee -a paths.txt
fi
done
echo Sort and deduplicate paths
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
mv sortedpaths.txt paths.txt
exec 3>&- #close fd 3
IFS=${oldifs}
unset oldifs
echo Split into flat-file database
exec 4<paths.txt # open paths.txt for input
while read -r -u 4 count level include
do
echo ${include} ${count} | tee -a includes/${level}
echo ${level} ${count} | tee -a included_by/${include}
done
exec 4>&- #close fd 4
loops="$( pwd )/loops.txt"
ordering="$( pwd )/ordering.txt"
pushd includes
echo Search for loops
# Redirect stdout to a file
exec 4>&1
exec 1>"${loops}"
for source in *
do
if [[ -f "$source" ]]
then
exec 5<"${source}" # open for input
while read -r -u 5 include includefreq
do
if [[ -f $include ]]
then
if grep -q -w $source $include
then
if grep -q -w "Loop: $include $source" "${loops}"
then
continue
fi
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
echo "Loop: $source $include"
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
then
echo -e " $source > $include\n"
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
then
echo -e " $include > $source\n"
elif [[ $sourcefreq -eq $includefreq ]]
then
echo -e " $include == $source\n"
else
echo -e " $include ~= $source\n"
fi
else
echo "$source > $include" >> "${ordering}"
fi
fi
done
exec 5>&- #close fd 5
fi
done
exec 1>&4 #close fd 1
exec 4>&- #close fd 4
cat "${ordering}"
cat "${loops}"
popd
popd
popd

View File

@@ -32,6 +32,14 @@ libxrpl.server > xrpl.server
libxrpl.shamap > xrpl.basics
libxrpl.shamap > xrpl.protocol
libxrpl.shamap > xrpl.shamap
libxrpl.tx > xrpl.basics
libxrpl.tx > xrpl.conditions
libxrpl.tx > xrpl.core
libxrpl.tx > xrpl.json
libxrpl.tx > xrpl.ledger
libxrpl.tx > xrpl.protocol
libxrpl.tx > xrpl.server
libxrpl.tx > xrpl.tx
test.app > test.jtx
test.app > test.rpc
test.app > test.toplevel
@@ -49,6 +57,7 @@ test.app > xrpl.protocol
test.app > xrpl.rdb
test.app > xrpl.resource
test.app > xrpl.server
test.app > xrpl.tx
test.basics > test.jtx
test.basics > test.unit_test
test.basics > xrpl.basics
@@ -67,6 +76,7 @@ test.consensus > xrpld.app
test.consensus > xrpld.consensus
test.consensus > xrpl.json
test.consensus > xrpl.ledger
test.consensus > xrpl.tx
test.core > test.jtx
test.core > test.toplevel
test.core > test.unit_test
@@ -93,6 +103,7 @@ test.jtx > xrpl.net
test.jtx > xrpl.protocol
test.jtx > xrpl.resource
test.jtx > xrpl.server
test.jtx > xrpl.tx
test.ledger > test.jtx
test.ledger > test.toplevel
test.ledger > xrpl.basics
@@ -138,9 +149,11 @@ test.rpc > xrpld.core
test.rpc > xrpld.overlay
test.rpc > xrpld.rpc
test.rpc > xrpl.json
test.rpc > xrpl.ledger
test.rpc > xrpl.protocol
test.rpc > xrpl.resource
test.rpc > xrpl.server
test.rpc > xrpl.tx
test.server > test.jtx
test.server > test.toplevel
test.server > test.unit_test
@@ -171,6 +184,7 @@ xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol
xrpl.ledger > xrpl.server
xrpl.ledger > xrpl.shamap
xrpl.net > xrpl.basics
xrpl.nodestore > xrpl.basics
xrpl.nodestore > xrpl.protocol
@@ -192,9 +206,12 @@ xrpl.server > xrpl.shamap
xrpl.shamap > xrpl.basics
xrpl.shamap > xrpl.nodestore
xrpl.shamap > xrpl.protocol
xrpl.tx > xrpl.basics
xrpl.tx > xrpl.core
xrpl.tx > xrpl.ledger
xrpl.tx > xrpl.protocol
xrpld.app > test.unit_test
xrpld.app > xrpl.basics
xrpld.app > xrpl.conditions
xrpld.app > xrpl.core
xrpld.app > xrpld.consensus
xrpld.app > xrpld.core
@@ -207,6 +224,7 @@ xrpld.app > xrpl.rdb
xrpld.app > xrpl.resource
xrpld.app > xrpl.server
xrpld.app > xrpl.shamap
xrpld.app > xrpl.tx
xrpld.consensus > xrpl.basics
xrpld.consensus > xrpl.json
xrpld.consensus > xrpl.protocol
@@ -225,6 +243,7 @@ xrpld.overlay > xrpl.protocol
xrpld.overlay > xrpl.rdb
xrpld.overlay > xrpl.resource
xrpld.overlay > xrpl.server
xrpld.overlay > xrpl.tx
xrpld.peerfinder > xrpl.basics
xrpld.peerfinder > xrpld.core
xrpld.peerfinder > xrpl.protocol
@@ -244,4 +263,5 @@ xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.rdb
xrpld.rpc > xrpl.resource
xrpld.rpc > xrpl.server
xrpld.rpc > xrpl.tx
xrpld.shamap > xrpl.shamap

View File

@@ -46,7 +46,7 @@ jobs:
# that Github considers any skipped jobs to have passed, and in
# turn the required checks as well.
id: changes
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
with:
files: |
# These paths are unique to `on-pr.yml`.
@@ -65,9 +65,12 @@ jobs:
.github/workflows/reusable-build.yml
.github/workflows/reusable-build-test-config.yml
.github/workflows/reusable-build-test.yml
.github/workflows/reusable-clang-tidy.yml
.github/workflows/reusable-clang-tidy-files.yml
.github/workflows/reusable-strategy-matrix.yml
.github/workflows/reusable-test.yml
.github/workflows/reusable-upload-recipe.yml
.clang-tidy
.codecov.yml
cmake/**
conan/**
@@ -107,6 +110,17 @@ jobs:
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-check-rename.yml
clang-tidy:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
uses: ./.github/workflows/reusable-clang-tidy.yml
permissions:
issues: write
contents: read
with:
check_only_changed: true
create_issue_on_failure: false
build-test:
needs: should-run
if: ${{ needs.should-run.outputs.go == 'true' }}
@@ -156,6 +170,7 @@ jobs:
needs:
- check-levelization
- check-rename
- clang-tidy
- build-test
- upload-recipe
- notify-clio

View File

@@ -22,9 +22,12 @@ on:
- ".github/workflows/reusable-build.yml"
- ".github/workflows/reusable-build-test-config.yml"
- ".github/workflows/reusable-build-test.yml"
- ".github/workflows/reusable-clang-tidy.yml"
- ".github/workflows/reusable-clang-tidy-files.yml"
- ".github/workflows/reusable-strategy-matrix.yml"
- ".github/workflows/reusable-test.yml"
- ".github/workflows/reusable-upload-recipe.yml"
- ".clang-tidy"
- ".codecov.yml"
- "cmake/**"
- "conan/**"
@@ -60,6 +63,15 @@ defaults:
shell: bash
jobs:
clang-tidy:
uses: ./.github/workflows/reusable-clang-tidy.yml
permissions:
issues: write
contents: read
with:
check_only_changed: false
create_issue_on_failure: ${{ github.event_name == 'schedule' }}
build-test:
uses: ./.github/workflows/reusable-build-test.yml
strategy:

View File

@@ -20,7 +20,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Check levelization
run: python .github/scripts/levelization/generate.py
run: .github/scripts/levelization/generate.sh
- name: Check for differences
env:
MESSAGE: |
@@ -32,7 +32,7 @@ jobs:
removed from loops.txt, it's probably an improvement, while if
something was added, it's probably a regression.
Run '.github/scripts/levelization/generate.py' in your repo, commit
Run '.github/scripts/levelization/generate.sh' in your repo, commit
and push the changes. See .github/scripts/levelization/README.md for
more info.
run: |

View File

@@ -0,0 +1,162 @@
name: Run clang-tidy on files
on:
workflow_call:
inputs:
files:
description: "List of files to check (empty means check all files)"
type: string
default: ""
create_issue_on_failure:
description: "Whether to create an issue if the check failed"
type: boolean
default: false
defaults:
run:
shell: bash
env:
# Conan installs the generators in the build/generators directory, see the
# layout() method in conanfile.py. We then run CMake from the build directory.
BUILD_DIR: build
BUILD_TYPE: Release
jobs:
run-clang-tidy:
name: Run clang tidy
runs-on: ["self-hosted", "Linux", "X64", "heavy"]
container: "ghcr.io/xrplf/ci/debian-trixie:clang-21-sha-53033a2"
permissions:
issues: write
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
- name: Prepare runner
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
with:
enable_ccache: false
- name: Print build environment
uses: ./.github/actions/print-env
- name: Get number of processors
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
id: nproc
- name: Setup Conan
uses: ./.github/actions/setup-conan
- name: Build dependencies
uses: ./.github/actions/build-deps
with:
build_nproc: ${{ steps.nproc.outputs.nproc }}
build_type: ${{ env.BUILD_TYPE }}
log_verbosity: verbose
- name: Configure CMake
working-directory: ${{ env.BUILD_DIR }}
run: |
cmake \
-G 'Ninja' \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
-Dtests=ON \
-Dwerr=ON \
-Dxrpld=ON \
..
# clang-tidy needs headers generated from proto files
- name: Build libxrpl.libpb
working-directory: ${{ env.BUILD_DIR }}
run: |
ninja -j ${{ steps.nproc.outputs.nproc }} xrpl.libpb
- name: Run clang tidy
id: run_clang_tidy
continue-on-error: true
env:
FILES: ${{ inputs.files }}
run: |
run-clang-tidy -j ${{ steps.nproc.outputs.nproc }} -p "$BUILD_DIR" $FILES 2>&1 | tee clang-tidy-output.txt
- name: Upload clang-tidy output
if: steps.run_clang_tidy.outcome != 'success'
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: clang-tidy-results
path: clang-tidy-output.txt
retention-days: 30
- name: Create an issue
if: steps.run_clang_tidy.outcome != 'success' && inputs.create_issue_on_failure
id: create_issue
shell: bash
env:
GH_TOKEN: ${{ github.token }}
run: |
# Prepare issue body with clang-tidy output
cat > issue.md <<EOF
## Clang-tidy Check Failed
**Workflow:** ${{ github.workflow }}
**Run ID:** ${{ github.run_id }}
**Commit:** ${{ github.sha }}
**Branch/Ref:** ${{ github.ref }}
**Triggered by:** ${{ github.actor }}
### Clang-tidy Output:
\`\`\`
EOF
# Append clang-tidy output (filter for errors and warnings)
if [ -f clang-tidy-output.txt ]; then
# Extract lines containing 'error:', 'warning:', or 'note:'
grep -E '(error:|warning:|note:)' clang-tidy-output.txt > filtered-output.txt || true
# If filtered output is empty, use original (might be a different error format)
if [ ! -s filtered-output.txt ]; then
cp clang-tidy-output.txt filtered-output.txt
fi
# Truncate if too large
head -c 60000 filtered-output.txt >> issue.md
if [ "$(wc -c < filtered-output.txt)" -gt 60000 ]; then
echo "" >> issue.md
echo "... (output truncated, see artifacts for full output)" >> issue.md
fi
rm filtered-output.txt
else
echo "No output file found" >> issue.md
fi
cat >> issue.md <<EOF
\`\`\`
**Workflow run:** ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
---
*This issue was automatically created by the clang-tidy workflow.*
EOF
# Create the issue
gh issue create \
--label "Bug,Clang-tidy" \
--title "Clang-tidy check failed" \
--body-file ./issue.md \
> create_issue.log
created_issue="$(sed 's|.*/||' create_issue.log)"
echo "created_issue=$created_issue" >> $GITHUB_OUTPUT
echo "Created issue #$created_issue"
rm -f create_issue.log issue.md clang-tidy-output.txt
- name: Fail the workflow if clang-tidy failed
if: steps.run_clang_tidy.outcome != 'success'
run: |
echo "Clang-tidy check failed!"
exit 1

View File

@@ -0,0 +1,47 @@
name: Clang-tidy check
on:
workflow_call:
inputs:
check_only_changed:
description: "Check only changed files in PR. If false, checks all files in the repository."
type: boolean
default: false
create_issue_on_failure:
description: "Whether to create an issue if the check failed"
type: boolean
default: false
defaults:
run:
shell: bash
jobs:
determine-files:
name: Determine files to check
if: ${{ inputs.check_only_changed }}
runs-on: ubuntu-latest
outputs:
any_changed: ${{ steps.changed_files.outputs.any_changed }}
all_changed_files: ${{ steps.changed_files.outputs.all_changed_files }}
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Get changed C++ files
id: changed_files
uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
with:
files: |
**/*.cpp
**/*.h
**/*.ipp
separator: " "
run-clang-tidy:
needs: [determine-files]
if: ${{ always() && !cancelled() && (!inputs.check_only_changed || needs.determine-files.outputs.any_changed == 'true') }}
uses: ./.github/workflows/reusable-clang-tidy-files.yml
with:
files: ${{ inputs.check_only_changed && needs.determine-files.outputs.all_changed_files || '' }}
create_issue_on_failure: ${{ inputs.create_issue_on_failure }}

3
.gitignore vendored
View File

@@ -72,8 +72,5 @@ DerivedData
/.claude
/CLAUDE.md
# Python
__pycache__
# clangd cache
/.cache

View File

@@ -17,6 +17,7 @@ project(xrpl)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
include(CompilationEnv)
@@ -38,16 +39,16 @@ include(Ccache)
# make GIT_COMMIT_HASH define available to all sources
find_package(Git)
if (Git_FOUND)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse HEAD
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse
HEAD OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
if (gch)
set(GIT_COMMIT_HASH "${gch}")
message(STATUS gch: ${GIT_COMMIT_HASH})
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
endif ()
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse --abbrev-ref HEAD
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse
--abbrev-ref HEAD OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
if (gb)
set(GIT_BRANCH "${gb}")
message(STATUS gb: ${GIT_BRANCH})
@@ -67,7 +68,8 @@ include(FetchContent)
include(ExternalProject)
include(CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
if (target)
message(FATAL_ERROR "The target option has been removed - use native cmake options to control build")
message(FATAL_ERROR "The target option has been removed - use native cmake options to control build"
)
endif ()
include(XrplSanity)
@@ -76,7 +78,8 @@ include(XrplSettings)
# this check has to remain in the top-level cmake because of the early return statement
if (packages_only)
if (NOT TARGET rpm)
message(FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?")
message(FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?"
)
endif ()
return()
endif ()
@@ -118,7 +121,8 @@ target_link_libraries(
option(rocksdb "Enable RocksDB" ON)
if (rocksdb)
find_package(RocksDB REQUIRED)
set_target_properties(RocksDB::rocksdb PROPERTIES INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1)
set_target_properties(RocksDB::rocksdb PROPERTIES INTERFACE_COMPILE_DEFINITIONS
XRPL_ROCKSDB_AVAILABLE=1)
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
endif ()

View File

@@ -43,7 +43,8 @@ set(CMAKE_VS_GLOBALS "CLToolExe=cl.exe" "CLToolPath=${CMAKE_BINARY_DIR}" "TrackF
# By default Visual Studio generators will use /Zi to capture debug information, which is not compatible with ccache, so
# tell it to use /Z7 instead.
if (MSVC)
foreach (var_ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE)
foreach (var_ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_RELEASE)
string(REPLACE "/Zi" "/Z7" ${var_} "${${var_}}")
endforeach ()
endif ()

View File

@@ -180,7 +180,8 @@ elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if (APPLE)
execute_process(COMMAND xcrun -f llvm-cov OUTPUT_VARIABLE LLVMCOV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND xcrun -f llvm-cov OUTPUT_VARIABLE LLVMCOV_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
else ()
find_program(LLVMCOV_PATH llvm-cov)
endif ()
@@ -199,8 +200,8 @@ foreach (LANG ${LANGUAGES})
if ("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
endif ()
elseif (NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES
"(LLVM)?[Ff]lang")
elseif (NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU" AND NOT "${CMAKE_${LANG}_COMPILER_ID}"
MATCHES "(LLVM)?[Ff]lang")
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif ()
endforeach ()
@@ -321,14 +322,16 @@ function (setup_target_for_coverage_gcovr)
endif ()
if ("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting..."
)
else ()
if ((Coverage_FORMAT STREQUAL "html-details") OR (Coverage_FORMAT STREQUAL "html-nested"))
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
elseif (Coverage_FORMAT STREQUAL "html-single")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
elseif ((Coverage_FORMAT STREQUAL "json-summary") OR (Coverage_FORMAT STREQUAL "json-details")
elseif ((Coverage_FORMAT STREQUAL "json-summary") OR (Coverage_FORMAT STREQUAL
"json-details")
OR (Coverage_FORMAT STREQUAL "coveralls"))
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
elseif (Coverage_FORMAT STREQUAL "txt")
@@ -452,8 +455,10 @@ function (setup_target_for_coverage_gcovr)
COMMENT "Running gcovr to produce code coverage report.")
# Show info where to find the report
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD COMMAND echo
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}")
add_custom_command(
TARGET ${Coverage_NAME} POST_BUILD COMMAND echo
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
)
endfunction () # setup_target_for_coverage_gcovr
function (add_code_coverage_to_target name scope)
@@ -463,8 +468,9 @@ function (add_code_coverage_to_target name scope)
separate_arguments(COVERAGE_C_LINKER_FLAGS NATIVE_COMMAND "${COVERAGE_C_LINKER_FLAGS}")
# Add compiler options to the target
target_compile_options(${name} ${scope} $<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
target_compile_options(
${name} ${scope} $<$<COMPILE_LANGUAGE:CXX>:${COVERAGE_CXX_COMPILER_FLAGS}>
$<$<COMPILE_LANGUAGE:C>:${COVERAGE_C_COMPILER_FLAGS}>)
target_link_libraries(${name} ${scope} $<$<LINK_LANGUAGE:CXX>:${COVERAGE_CXX_LINKER_FLAGS}>
$<$<LINK_LANGUAGE:C>:${COVERAGE_C_LINKER_FLAGS}>)

View File

@@ -17,7 +17,8 @@ link_libraries(Xrpl::common)
if (NOT DEFINED CMAKE_POSITION_INDEPENDENT_CODE)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif ()
set_target_properties(common PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ${CMAKE_POSITION_INDEPENDENT_CODE})
set_target_properties(common PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE
${CMAKE_POSITION_INDEPENDENT_CODE})
set(CMAKE_CXX_EXTENSIONS OFF)
target_compile_definitions(
common
@@ -37,7 +38,8 @@ if (MSVC)
# remove existing exception flag since we set it to -EHa
string(REGEX REPLACE "[-/]EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
foreach (var_ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE)
foreach (var_ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_RELEASE)
# also remove dynamic runtime
string(REGEX REPLACE "[-/]MD[d]*" " " ${var_} "${${var_}}")
@@ -143,20 +145,23 @@ if (voidstar)
elseif (NOT is_linux)
message(FATAL_ERROR "Antithesis instrumentation requires Linux, aborting...")
elseif (NOT (is_clang AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 16.0))
message(FATAL_ERROR "Antithesis instrumentation requires Clang version 16 or later, aborting...")
message(FATAL_ERROR "Antithesis instrumentation requires Clang version 16 or later, aborting..."
)
endif ()
endif ()
if (use_mold)
# use mold linker if available
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=mold -Wl,--version ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=mold -Wl,--version ERROR_QUIET
OUTPUT_VARIABLE LD_VERSION)
if ("${LD_VERSION}" MATCHES "mold")
target_link_libraries(common INTERFACE -fuse-ld=mold)
endif ()
unset(LD_VERSION)
elseif (use_gold AND is_gcc)
# use gold linker if available
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=gold -Wl,--version ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=gold -Wl,--version ERROR_QUIET
OUTPUT_VARIABLE LD_VERSION)
#[=========================================================[
NOTE: THE gold linker inserts -rpath as DT_RUNPATH by
default instead of DT_RPATH, so you might have slightly
@@ -186,7 +191,8 @@ elseif (use_gold AND is_gcc)
unset(LD_VERSION)
elseif (use_lld)
# use lld linker if available
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=lld -Wl,--version ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=lld -Wl,--version ERROR_QUIET
OUTPUT_VARIABLE LD_VERSION)
if ("${LD_VERSION}" MATCHES "LLD")
target_link_libraries(common INTERFACE -fuse-ld=lld)
endif ()

View File

@@ -14,7 +14,8 @@ target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/x
PROTOS include/xrpl/proto/xrpl.proto)
file(GLOB_RECURSE protos "include/xrpl/proto/org/*.proto")
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto PROTOS "${protos}")
target_protobuf_sources(xrpl.libpb xrpl/proto LANGUAGE cpp IMPORT_DIRS include/xrpl/proto
PROTOS "${protos}")
target_protobuf_sources(
xrpl.libpb xrpl/proto
LANGUAGE grpc
@@ -24,8 +25,9 @@ target_protobuf_sources(
GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc)
target_compile_options(
xrpl.libpb PUBLIC $<$<BOOL:${is_msvc}>:-wd4996> $<$<BOOL:${is_xcode}>: --system-header-prefix="google/protobuf"
-Wno-deprecated-dynamic-exception-spec >
xrpl.libpb
PUBLIC $<$<BOOL:${is_msvc}>:-wd4996> $<$<BOOL:${is_xcode}>:
--system-header-prefix="google/protobuf" -Wno-deprecated-dynamic-exception-spec >
PRIVATE $<$<BOOL:${is_msvc}>:-wd4065> $<$<NOT:$<BOOL:${is_msvc}>>:-Wno-deprecated-declarations>)
target_link_libraries(xrpl.libpb PUBLIC protobuf::libprotobuf gRPC::grpc++)
@@ -73,7 +75,8 @@ target_link_libraries(xrpl.libxrpl.protocol PUBLIC xrpl.libxrpl.crypto xrpl.libx
# Level 05
add_module(xrpl core)
target_link_libraries(xrpl.libxrpl.core PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol)
target_link_libraries(xrpl.libxrpl.core PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json
xrpl.libxrpl.protocol)
# Level 06
add_module(xrpl resource)
@@ -81,22 +84,23 @@ target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
# Level 07
add_module(xrpl net)
target_link_libraries(xrpl.libxrpl.net PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol
xrpl.libxrpl.resource)
target_link_libraries(xrpl.libxrpl.net PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json
xrpl.libxrpl.protocol xrpl.libxrpl.resource)
add_module(xrpl nodestore)
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json xrpl.libxrpl.protocol)
target_link_libraries(xrpl.libxrpl.nodestore PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.json
xrpl.libxrpl.protocol)
add_module(xrpl shamap)
target_link_libraries(xrpl.libxrpl.shamap PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.crypto xrpl.libxrpl.protocol
xrpl.libxrpl.nodestore)
target_link_libraries(xrpl.libxrpl.shamap PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.crypto
xrpl.libxrpl.protocol xrpl.libxrpl.nodestore)
add_module(xrpl rdb)
target_link_libraries(xrpl.libxrpl.rdb PUBLIC xrpl.libxrpl.basics xrpl.libxrpl.core)
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol xrpl.libxrpl.core xrpl.libxrpl.rdb
xrpl.libxrpl.resource)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol xrpl.libxrpl.core
xrpl.libxrpl.rdb xrpl.libxrpl.resource)
add_module(xrpl conditions)
target_link_libraries(xrpl.libxrpl.conditions PUBLIC xrpl.libxrpl.server)
@@ -109,8 +113,12 @@ target_link_libraries(
xrpl.libxrpl.protocol
xrpl.libxrpl.rdb
xrpl.libxrpl.server
xrpl.libxrpl.shamap
xrpl.libxrpl.conditions)
add_module(xrpl tx)
target_link_libraries(xrpl.libxrpl.tx PUBLIC xrpl.libxrpl.ledger)
add_library(xrpl.libxrpl)
set_target_properties(xrpl.libxrpl PROPERTIES OUTPUT_NAME xrpl)
@@ -135,7 +143,8 @@ target_link_modules(
rdb
resource
server
shamap)
shamap
tx)
# All headers in libxrpl are in modules.
# Uncomment this stanza if you have not yet moved new headers into a module.

View File

@@ -65,8 +65,8 @@ add_custom_command(
OUTPUT "${doxygen_index_file}"
COMMAND "${CMAKE_COMMAND}" -E env "DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}"
"DOXYGEN_INCLUDE_PATH=${doxygen_include_path}" "DOXYGEN_TAGFILES=${doxygen_tagfiles}"
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}" "DOXYGEN_DOT_PATH=${doxygen_dot_path}"
"${DOXYGEN_EXECUTABLE}" "${doxyfile}"
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}"
"DOXYGEN_DOT_PATH=${doxygen_dot_path}" "${DOXYGEN_EXECUTABLE}" "${doxyfile}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
DEPENDS "${dependencies}" "${tagfile}")
add_custom_target(docs DEPENDS "${doxygen_index_file}" SOURCES "${dependencies}")

View File

@@ -32,6 +32,7 @@ install(TARGETS common
xrpl.libxrpl.resource
xrpl.libxrpl.server
xrpl.libxrpl.shamap
xrpl.libxrpl.tx
antithesis-sdk-cpp
EXPORT XrplExports
LIBRARY DESTINATION lib
@@ -40,11 +41,13 @@ install(TARGETS common
INCLUDES
DESTINATION include)
install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/xrpl" DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/xrpl"
DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
install(EXPORT XrplExports FILE XrplTargets.cmake NAMESPACE Xrpl:: DESTINATION lib/cmake/xrpl)
include(CMakePackageConfigHelpers)
write_basic_package_version_file(XrplConfigVersion.cmake VERSION ${xrpld_version} COMPATIBILITY SameMajorVersion)
write_basic_package_version_file(XrplConfigVersion.cmake VERSION ${xrpld_version}
COMPATIBILITY SameMajorVersion)
if (is_root_project AND TARGET xrpld)
install(TARGETS xrpld RUNTIME DESTINATION bin)
@@ -71,5 +74,5 @@ if (is_root_project AND TARGET xrpld)
")
endif ()
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/XrplConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/XrplConfigVersion.cmake
DESTINATION lib/cmake/xrpl)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/XrplConfig.cmake
${CMAKE_CURRENT_BINARY_DIR}/XrplConfigVersion.cmake DESTINATION lib/cmake/xrpl)

View File

@@ -33,10 +33,13 @@ target_compile_definitions(
target_compile_options(
opts
INTERFACE $<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized> $<$<BOOL:${perf}>:-fno-omit-frame-pointer>
$<$<BOOL:${profile}>:-pg> $<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
$<$<BOOL:${is_gcc}>:-Wno-maybe-uninitialized>
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
$<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
target_link_libraries(opts INTERFACE $<$<BOOL:${profile}>:-pg> $<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
target_link_libraries(opts INTERFACE $<$<BOOL:${profile}>:-pg>
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
if (jemalloc)
find_package(jemalloc REQUIRED)

View File

@@ -19,7 +19,8 @@ if (NOT is_multiconfig)
endif ()
if (is_clang) # both Clang and AppleClang
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16.0)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS
16.0)
message(FATAL_ERROR "This project requires clang 16 or later")
endif ()
elseif (is_gcc)
@@ -32,7 +33,8 @@ endif ()
if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
message(FATAL_ERROR "Builds (in-source) are not allowed in "
"${CMAKE_CURRENT_SOURCE_DIR}. Please remove CMakeCache.txt and the CMakeFiles "
"directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory.")
"directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory."
)
endif ()
if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")

View File

@@ -70,7 +70,8 @@ if (is_linux AND NOT SANITIZER)
else ()
set(TRUNCATED_LOGS_DEFAULT OFF)
endif ()
option(TRUNCATED_THREAD_NAME_LOGS "Show warnings about truncated thread names on Linux." ${TRUNCATED_LOGS_DEFAULT})
option(TRUNCATED_THREAD_NAME_LOGS "Show warnings about truncated thread names on Linux."
${TRUNCATED_LOGS_DEFAULT})
if (TRUNCATED_THREAD_NAME_LOGS)
add_compile_definitions(TRUNCATED_THREAD_NAME_LOGS)
endif ()
@@ -92,11 +93,13 @@ endif ()
option(jemalloc "Enables jemalloc for heap profiling" OFF)
option(werr "treat warnings as errors" OFF)
option(local_protobuf "Force a local build of protobuf instead of looking for an installed version." OFF)
option(local_protobuf
"Force a local build of protobuf instead of looking for an installed version." OFF)
option(local_grpc "Force a local build of gRPC instead of looking for an installed version." OFF)
# the remaining options are obscure and rarely used
option(beast_no_unit_test_inline "Prevents unit test definitions from being inserted into global table" OFF)
option(beast_no_unit_test_inline
"Prevents unit test definitions from being inserted into global table" OFF)
option(single_io_service_thread "Restricts the number of threads calling io_context::run to one. \
This can be useful when debugging." OFF)
option(boost_show_deprecated "Allow boost to fail on deprecated usage. Only useful if you're trying\

View File

@@ -1,4 +1,6 @@
option(validator_keys "Enables building of validator-keys tool as a separate target (imported via FetchContent)" OFF)
option(validator_keys
"Enables building of validator-keys tool as a separate target (imported via FetchContent)"
OFF)
if (validator_keys)
git_branch(current_branch)
@@ -8,8 +10,9 @@ if (validator_keys)
endif ()
message(STATUS "Tracking ValidatorKeys branch: ${current_branch}")
FetchContent_Declare(validator_keys GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
GIT_TAG "${current_branch}")
FetchContent_Declare(
validator_keys GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
GIT_TAG "${current_branch}")
FetchContent_MakeAvailable(validator_keys)
set_target_properties(validator-keys PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}")
install(TARGETS validator-keys RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})

View File

@@ -15,11 +15,12 @@ include(isolate_headers)
function (add_module parent name)
set(target ${PROJECT_NAME}.lib${parent}.${name})
add_library(${target} OBJECT)
file(GLOB_RECURSE sources CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}/*.cpp")
file(GLOB_RECURSE sources CONFIGURE_DEPENDS
"${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}/*.cpp")
target_sources(${target} PRIVATE ${sources})
target_include_directories(${target} PUBLIC "$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>")
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}/include/${parent}/${name}" PUBLIC)
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}"
PRIVATE)
isolate_headers(${target} "${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src/lib${parent}/${name}" PRIVATE)
endfunction ()

View File

@@ -39,6 +39,7 @@ if (SANITIZERS_ENABLED AND is_clang)
endif ()
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
target_compile_options(opts INTERFACE # ignore boost headers for sanitizing
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
target_compile_options(
opts INTERFACE # ignore boost headers for sanitizing
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
endif ()

View File

@@ -6,6 +6,7 @@ ignorePaths:
- docs/**/*.puml
- cmake/**
- LICENSE.md
- .clang-tidy
language: en
allowCompoundWords: true # TODO (#6334)
ignoreRandomStrings: true

View File

@@ -84,7 +84,8 @@ public:
if (lines_.empty())
return "";
if (lines_.size() > 1)
Throw<std::runtime_error>("A legacy value must have exactly one line. Section: " + name_);
Throw<std::runtime_error>(
"A legacy value must have exactly one line. Section: " + name_);
return lines_[0];
}
@@ -268,7 +269,8 @@ public:
bool
had_trailing_comments() const
{
return std::any_of(map_.cbegin(), map_.cend(), [](auto s) { return s.second.had_trailing_comments(); });
return std::any_of(
map_.cbegin(), map_.cend(), [](auto s) { return s.second.had_trailing_comments(); });
}
protected:

View File

@@ -35,7 +35,10 @@ lz4Compress(void const* in, std::size_t inSize, BufferFactory&& bf)
auto compressed = bf(outCapacity);
auto compressedSize = LZ4_compress_default(
reinterpret_cast<char const*>(in), reinterpret_cast<char*>(compressed), inSize, outCapacity);
reinterpret_cast<char const*>(in),
reinterpret_cast<char*>(compressed),
inSize,
outCapacity);
if (compressedSize == 0)
Throw<std::runtime_error>("lz4 compress: failed");
@@ -66,8 +69,10 @@ lz4Decompress(
Throw<std::runtime_error>("lz4Decompress: integer overflow (output)");
if (LZ4_decompress_safe(
reinterpret_cast<char const*>(in), reinterpret_cast<char*>(decompressed), inSize, decompressedSize) !=
decompressedSize)
reinterpret_cast<char const*>(in),
reinterpret_cast<char*>(decompressed),
inSize,
decompressedSize) != decompressedSize)
Throw<std::runtime_error>("lz4Decompress: failed");
return decompressedSize;
@@ -83,7 +88,11 @@ lz4Decompress(
*/
template <typename InputStream>
std::size_t
lz4Decompress(InputStream& in, std::size_t inSize, std::uint8_t* decompressed, std::size_t decompressedSize)
lz4Decompress(
InputStream& in,
std::size_t inSize,
std::uint8_t* decompressed,
std::size_t decompressedSize)
{
std::vector<std::uint8_t> compressed;
std::uint8_t const* chunk = nullptr;

View File

@@ -55,7 +55,8 @@ private:
if (m_value != value_type())
{
std::size_t elapsed = std::chrono::duration_cast<std::chrono::seconds>(now - m_when).count();
std::size_t elapsed =
std::chrono::duration_cast<std::chrono::seconds>(now - m_when).count();
// A span larger than four times the window decays the
// value to an insignificant amount so just reset it.

View File

@@ -120,7 +120,8 @@ public:
template <typename U>
requires std::convertible_to<U, E> && (!std::is_reference_v<U>)
constexpr Expected(Unexpected<U> e) : Base(boost::outcome_v2::in_place_type_t<E>{}, std::move(e.value()))
constexpr Expected(Unexpected<U> e)
: Base(boost::outcome_v2::in_place_type_t<E>{}, std::move(e.value()))
{
}
@@ -191,7 +192,8 @@ public:
// Specialization of Expected<void, E>. Allows returning either success
// (without a value) or the reason for the failure.
template <class E>
class [[nodiscard]] Expected<void, E> : private boost::outcome_v2::result<void, E, detail::throw_policy>
class [[nodiscard]]
Expected<void, E> : private boost::outcome_v2::result<void, E, detail::throw_policy>
{
using Base = boost::outcome_v2::result<void, E, detail::throw_policy>;

View File

@@ -14,6 +14,9 @@ getFileContents(
std::optional<std::size_t> maxSize = std::nullopt);
void
writeFileContents(boost::system::error_code& ec, boost::filesystem::path const& destPath, std::string const& contents);
writeFileContents(
boost::system::error_code& ec,
boost::filesystem::path const& destPath,
std::string const& contents);
} // namespace xrpl

View File

@@ -44,8 +44,8 @@ struct SharedIntrusiveAdoptNoIncrementTag
//
template <class T>
concept CAdoptTag =
std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> || std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
concept CAdoptTag = std::is_same_v<T, SharedIntrusiveAdoptIncrementStrongTag> ||
std::is_same_v<T, SharedIntrusiveAdoptNoIncrementTag>;
//------------------------------------------------------------------------------
@@ -443,7 +443,8 @@ make_SharedIntrusive(Args&&... args)
auto p = new TT(std::forward<Args>(args)...);
static_assert(
noexcept(SharedIntrusive<TT>(std::declval<TT*>(), std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
noexcept(SharedIntrusive<TT>(
std::declval<TT*>(), std::declval<SharedIntrusiveAdoptNoIncrementTag>())),
"SharedIntrusive constructor should not throw or this can leak "
"memory");

View File

@@ -184,7 +184,8 @@ private:
/** Mask that will zero out everything except the weak count.
*/
static constexpr FieldType weakMask = (((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
static constexpr FieldType weakMask =
(((one << WeakCountNumBits) - 1) << StrongCountNumBits) & valueMask;
/** Unpack the count and tag fields from the packed atomic integer form. */
struct RefCountPair
@@ -209,8 +210,10 @@ private:
FieldType
combinedValue() const noexcept;
static constexpr CountType maxStrongValue = static_cast<CountType>((one << StrongCountNumBits) - 1);
static constexpr CountType maxWeakValue = static_cast<CountType>((one << WeakCountNumBits) - 1);
static constexpr CountType maxStrongValue =
static_cast<CountType>((one << StrongCountNumBits) - 1);
static constexpr CountType maxWeakValue =
static_cast<CountType>((one << WeakCountNumBits) - 1);
/** Put an extra margin to detect when running up against limits.
This is only used in debug code, and is useful if we reduce the
number of bits in the strong and weak counts (to 16 and 14 bits).
@@ -395,7 +398,8 @@ inline IntrusiveRefCounts::~IntrusiveRefCounts() noexcept
{
#ifndef NDEBUG
auto v = refCounts.load(std::memory_order_acquire);
XRPL_ASSERT((!(v & valueMask)), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
XRPL_ASSERT(
(!(v & valueMask)), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : count must be zero");
auto t = v & tagMask;
XRPL_ASSERT((!t || t == tagMask), "xrpl::IntrusiveRefCounts::~IntrusiveRefCounts : valid tag");
#endif
@@ -433,8 +437,10 @@ IntrusiveRefCounts::RefCountPair::combinedValue() const noexcept
(strong < checkStrongMaxValue && weak < checkWeakMaxValue),
"xrpl::IntrusiveRefCounts::RefCountPair::combinedValue : inputs "
"inside range");
return (static_cast<IntrusiveRefCounts::FieldType>(weak) << IntrusiveRefCounts::StrongCountNumBits) |
static_cast<IntrusiveRefCounts::FieldType>(strong) | partialDestroyStartedBit | partialDestroyFinishedBit;
return (static_cast<IntrusiveRefCounts::FieldType>(weak)
<< IntrusiveRefCounts::StrongCountNumBits) |
static_cast<IntrusiveRefCounts::FieldType>(strong) | partialDestroyStartedBit |
partialDestroyFinishedBit;
}
template <class T>
@@ -442,7 +448,8 @@ inline void
partialDestructorFinished(T** o)
{
T& self = **o;
IntrusiveRefCounts::RefCountPair p = self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
IntrusiveRefCounts::RefCountPair p =
self.refCounts.fetch_or(IntrusiveRefCounts::partialDestroyFinishedMask);
XRPL_ASSERT(
(!p.partialDestroyFinishedBit && p.partialDestroyStartedBit && !p.strong),
"xrpl::partialDestructorFinished : not a weak ref");

View File

@@ -103,6 +103,7 @@ LocalValue<T>::operator*()
}
return *reinterpret_cast<T*>(
lvs->values.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_)).first->second->get());
lvs->values.emplace(this, std::make_unique<detail::LocalValues::Value<T>>(t_))
.first->second->get());
}
} // namespace xrpl

View File

@@ -170,7 +170,11 @@ public:
partition_severities() const;
void
write(beast::severities::Severity level, std::string const& partition, std::string const& text, bool console);
write(
beast::severities::Severity level,
std::string const& partition,
std::string const& text,
bool console);
std::string
rotate();

View File

@@ -240,7 +240,11 @@ public:
Number(rep mantissa);
explicit Number(rep mantissa, int exponent);
explicit constexpr Number(bool negative, internalrep mantissa, int exponent, unchecked) noexcept;
explicit constexpr Number(
bool negative,
internalrep mantissa,
int exponent,
unchecked) noexcept;
// Assume unsigned values are... unsigned. i.e. positive
explicit constexpr Number(internalrep mantissa, int exponent, unchecked) noexcept;
// Only unit tests are expected to use this ctor
@@ -294,7 +298,8 @@ public:
friend constexpr bool
operator==(Number const& x, Number const& y) noexcept
{
return x.negative_ == y.negative_ && x.mantissa_ == y.mantissa_ && x.exponent_ == y.exponent_;
return x.negative_ == y.negative_ && x.mantissa_ == y.mantissa_ &&
x.exponent_ == y.exponent_;
}
friend constexpr bool
@@ -502,7 +507,11 @@ private:
class Guard;
};
inline constexpr Number::Number(bool negative, internalrep mantissa, int exponent, unchecked) noexcept
inline constexpr Number::Number(
bool negative,
internalrep mantissa,
int exponent,
unchecked) noexcept
: negative_(negative), mantissa_{mantissa}, exponent_{exponent}
{
}
@@ -520,7 +529,8 @@ inline Number::Number(bool negative, internalrep mantissa, int exponent, normali
normalize();
}
inline Number::Number(internalrep mantissa, int exponent, normalized) : Number(false, mantissa, exponent, normalized{})
inline Number::Number(internalrep mantissa, int exponent, normalized)
: Number(false, mantissa, exponent, normalized{})
{
}
@@ -682,8 +692,8 @@ Number::isnormal() const noexcept
MantissaRange const& range = range_;
auto const abs_m = mantissa_;
return *this == Number{} ||
(range.min <= abs_m && abs_m <= range.max && (abs_m <= maxRep || abs_m % 10 == 0) && minExponent <= exponent_ &&
exponent_ <= maxExponent);
(range.min <= abs_m && abs_m <= range.max && (abs_m <= maxRep || abs_m % 10 == 0) &&
minExponent <= exponent_ && exponent_ <= maxExponent);
}
template <Integral64 T>
@@ -695,7 +705,10 @@ Number::normalizeToRange(T minMantissa, T maxMantissa) const
int exponent = exponent_;
if constexpr (std::is_unsigned_v<T>)
XRPL_ASSERT_PARTS(!negative, "xrpl::Number::normalizeToRange", "Number is non-negative for unsigned range.");
XRPL_ASSERT_PARTS(
!negative,
"xrpl::Number::normalizeToRange",
"Number is non-negative for unsigned range.");
Number::normalize(negative, mantissa, exponent, minMantissa, maxMantissa);
auto const sign = negative ? -1 : 1;
@@ -781,7 +794,8 @@ class NumberRoundModeGuard
saveNumberRoundMode saved_;
public:
explicit NumberRoundModeGuard(Number::rounding_mode mode) noexcept : saved_{Number::setround(mode)}
explicit NumberRoundModeGuard(Number::rounding_mode mode) noexcept
: saved_{Number::setround(mode)}
{
}
@@ -801,7 +815,8 @@ class NumberMantissaScaleGuard
MantissaRange::mantissa_scale const saved_;
public:
explicit NumberMantissaScaleGuard(MantissaRange::mantissa_scale scale) noexcept : saved_{Number::getMantissaScale()}
explicit NumberMantissaScaleGuard(MantissaRange::mantissa_scale scale) noexcept
: saved_{Number::getMantissaScale()}
{
Number::setMantissaScale(scale);
}

View File

@@ -19,7 +19,8 @@ SharedWeakCachePointer<T>::SharedWeakCachePointer(SharedWeakCachePointer&& rhs)
template <class T>
template <class TT>
requires std::convertible_to<TT*, T*>
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs) : combo_{std::move(rhs)}
SharedWeakCachePointer<T>::SharedWeakCachePointer(std::shared_ptr<TT>&& rhs)
: combo_{std::move(rhs)}
{
}

View File

@@ -155,13 +155,17 @@ public:
contexts (e.g. when minimal memory usage is needed) and
allows for graceful failure.
*/
constexpr explicit SlabAllocator(std::size_t extra, std::size_t alloc = 0, std::size_t align = 0)
constexpr explicit SlabAllocator(
std::size_t extra,
std::size_t alloc = 0,
std::size_t align = 0)
: itemAlignment_(align ? align : alignof(Type))
, itemSize_(boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
, slabSize_(alloc)
{
XRPL_ASSERT(
(itemAlignment_ & (itemAlignment_ - 1)) == 0, "xrpl::SlabAllocator::SlabAllocator : valid alignment");
(itemAlignment_ & (itemAlignment_ - 1)) == 0,
"xrpl::SlabAllocator::SlabAllocator : valid alignment");
}
SlabAllocator(SlabAllocator const& other) = delete;
@@ -228,7 +232,8 @@ public:
// We need to carve out a bit of memory for the slab header
// and then align the rest appropriately:
auto slabData = reinterpret_cast<void*>(reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
auto slabData =
reinterpret_cast<void*>(reinterpret_cast<std::uint8_t*>(buf) + sizeof(SlabBlock));
auto slabSize = size - sizeof(SlabBlock);
// This operation is essentially guaranteed not to fail but
@@ -239,10 +244,12 @@ public:
return nullptr;
}
slab = new (buf) SlabBlock(slabs_.load(), reinterpret_cast<std::uint8_t*>(slabData), slabSize, itemSize_);
slab = new (buf) SlabBlock(
slabs_.load(), reinterpret_cast<std::uint8_t*>(slabData), slabSize, itemSize_);
// Link the new slab
while (!slabs_.compare_exchange_weak(slab->next_, slab, std::memory_order_release, std::memory_order_relaxed))
while (!slabs_.compare_exchange_weak(
slab->next_, slab, std::memory_order_release, std::memory_order_relaxed))
{
; // Nothing to do
}
@@ -299,7 +306,10 @@ public:
std::size_t align;
public:
constexpr SlabConfig(std::size_t extra_, std::size_t alloc_ = 0, std::size_t align_ = alignof(Type))
constexpr SlabConfig(
std::size_t extra_,
std::size_t alloc_ = 0,
std::size_t align_ = alignof(Type))
: extra(extra_), alloc(alloc_), align(align_)
{
}
@@ -309,15 +319,18 @@ public:
{
// Ensure that the specified allocators are sorted from smallest to
// largest by size:
std::sort(
std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) { return a.extra < b.extra; });
std::sort(std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
return a.extra < b.extra;
});
// We should never have two slabs of the same size
if (std::adjacent_find(std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
return a.extra == b.extra;
}) != cfg.end())
if (std::adjacent_find(
std::begin(cfg), std::end(cfg), [](SlabConfig const& a, SlabConfig const& b) {
return a.extra == b.extra;
}) != cfg.end())
{
throw std::runtime_error("SlabAllocatorSet<" + beast::type_name<Type>() + ">: duplicate slab size");
throw std::runtime_error(
"SlabAllocatorSet<" + beast::type_name<Type>() + ">: duplicate slab size");
}
for (auto const& c : cfg)

View File

@@ -40,7 +40,8 @@ public:
operator=(Slice const&) noexcept = default;
/** Create a slice pointing to existing memory. */
Slice(void const* data, std::size_t size) noexcept : data_(reinterpret_cast<std::uint8_t const*>(data)), size_(size)
Slice(void const* data, std::size_t size) noexcept
: data_(reinterpret_cast<std::uint8_t const*>(data)), size_(size)
{
}
@@ -197,7 +198,8 @@ operator!=(Slice const& lhs, Slice const& rhs) noexcept
inline bool
operator<(Slice const& lhs, Slice const& rhs) noexcept
{
return std::lexicographical_compare(lhs.data(), lhs.data() + lhs.size(), rhs.data(), rhs.data() + rhs.size());
return std::lexicographical_compare(
lhs.data(), lhs.data() + lhs.size(), rhs.data(), rhs.data() + rhs.size());
}
template <class Stream>

View File

@@ -108,7 +108,8 @@ struct parsedURL
bool
operator==(parsedURL const& other) const
{
return scheme == other.scheme && domain == other.domain && port == other.port && path == other.path;
return scheme == other.scheme && domain == other.domain && port == other.port &&
path == other.path;
}
};

View File

@@ -175,7 +175,10 @@ private:
struct Stats
{
template <class Handler>
Stats(std::string const& prefix, Handler const& handler, beast::insight::Collector::ptr const& collector)
Stats(
std::string const& prefix,
Handler const& handler,
beast::insight::Collector::ptr const& collector)
: hook(collector->make_hook(handler))
, size(collector->make_gauge(prefix, "size"))
, hit_rate(collector->make_gauge(prefix, "hit_rate"))
@@ -197,7 +200,8 @@ private:
public:
clock_type::time_point last_access;
explicit KeyOnlyEntry(clock_type::time_point const& last_access_) : last_access(last_access_)
explicit KeyOnlyEntry(clock_type::time_point const& last_access_)
: last_access(last_access_)
{
}

View File

@@ -14,13 +14,22 @@ template <
class Hash,
class KeyEqual,
class Mutex>
inline TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::TaggedCache(
std::string const& name,
int size,
clock_type::duration expiration,
clock_type& clock,
beast::Journal journal,
beast::insight::Collector::ptr const& collector)
inline TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
TaggedCache(
std::string const& name,
int size,
clock_type::duration expiration,
clock_type& clock,
beast::Journal journal,
beast::insight::Collector::ptr const& collector)
: m_journal(journal)
, m_clock(clock)
, m_stats(name, std::bind(&TaggedCache::collect_metrics, this), collector)
@@ -43,8 +52,8 @@ template <
class KeyEqual,
class Mutex>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::clock()
-> clock_type&
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
clock() -> clock_type&
{
return m_clock;
}
@@ -59,7 +68,8 @@ template <
class KeyEqual,
class Mutex>
inline std::size_t
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::size() const
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
size() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
@@ -75,7 +85,8 @@ template <
class KeyEqual,
class Mutex>
inline int
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getCacheSize() const
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
getCacheSize() const
{
std::lock_guard lock(m_mutex);
return m_cache_count;
@@ -91,7 +102,8 @@ template <
class KeyEqual,
class Mutex>
inline int
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getTrackSize() const
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
@@ -107,7 +119,8 @@ template <
class KeyEqual,
class Mutex>
inline float
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getHitRate()
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
getHitRate()
{
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
@@ -124,7 +137,8 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::clear()
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
clear()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
@@ -141,7 +155,8 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::reset()
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
reset()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
@@ -161,8 +176,8 @@ template <
class Mutex>
template <class KeyComparable>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::touch_if_exists(
KeyComparable const& key)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
auto const iter(m_cache.find(key));
@@ -186,7 +201,8 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::sweep()
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
sweep()
{
// Keep references to all the stuff we sweep
// For performance, each worker thread should exit before the swept data
@@ -212,8 +228,9 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (when_expire > (now - minimumAge))
when_expire = now - minimumAge;
JLOG(m_journal.trace()) << m_name << " is growing fast " << m_cache.size() << " of " << m_target_size
<< " aging at " << (now - when_expire).count() << " of " << m_target_age.count();
JLOG(m_journal.trace())
<< m_name << " is growing fast " << m_cache.size() << " of " << m_target_size
<< " aging at " << (now - when_expire).count() << " of " << m_target_age.count();
}
std::vector<std::thread> workers;
@@ -222,7 +239,8 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
for (std::size_t p = 0; p < m_cache.partitions(); ++p)
{
workers.push_back(sweepHelper(when_expire, now, m_cache.map()[p], allStuffToSweep[p], allRemovals, lock));
workers.push_back(sweepHelper(
when_expire, now, m_cache.map()[p], allStuffToSweep[p], allRemovals, lock));
}
for (std::thread& worker : workers)
worker.join();
@@ -231,10 +249,11 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
}
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
JLOG(m_journal.debug())
<< m_name << " TaggedCache sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start).count()
<< "ms";
JLOG(m_journal.debug()) << m_name << " TaggedCache sweep lock duration "
<< std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start)
.count()
<< "ms";
}
template <
@@ -247,9 +266,8 @@ template <
class KeyEqual,
class Mutex>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::del(
key_type const& key,
bool valid)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
del(key_type const& key, bool valid)
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
@@ -288,10 +306,8 @@ template <
class Mutex>
template <class R>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::canonicalize(
key_type const& key,
SharedPointerType& data,
R&& replaceCallback)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
canonicalize(key_type const& key, SharedPointerType& data, R&& replaceCallback)
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
@@ -302,7 +318,9 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(m_clock.now(), data));
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
return false;
}
@@ -404,8 +422,8 @@ template <
class KeyEqual,
class Mutex>
inline SharedPointerType
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::fetch(
key_type const& key)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
fetch(key_type const& key)
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
@@ -425,9 +443,8 @@ template <
class Mutex>
template <class ReturnType>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::insert(
key_type const& key,
T const& value) -> std::enable_if_t<!IsKeyCache, ReturnType>
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
insert(key_type const& key, T const& value) -> std::enable_if_t<!IsKeyCache, ReturnType>
{
static_assert(
std::is_same_v<std::shared_ptr<T>, SharedPointerType> ||
@@ -456,13 +473,13 @@ template <
class Mutex>
template <class ReturnType>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::insert(
key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
insert(key_type const& key) -> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
auto [it, inserted] =
m_cache.emplace(std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(now));
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(now));
if (!inserted)
it->second.last_access = now;
return inserted;
@@ -478,9 +495,8 @@ template <
class KeyEqual,
class Mutex>
inline bool
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::retrieve(
key_type const& key,
T& data)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
retrieve(key_type const& key, T& data)
{
// retrieve the value of the stored data
auto entry = fetch(key);
@@ -502,8 +518,8 @@ template <
class KeyEqual,
class Mutex>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::peekMutex()
-> mutex_type&
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
peekMutex() -> mutex_type&
{
return m_mutex;
}
@@ -518,8 +534,8 @@ template <
class KeyEqual,
class Mutex>
inline auto
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::getKeys() const
-> std::vector<key_type>
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
getKeys() const -> std::vector<key_type>
{
std::vector<key_type> v;
@@ -543,7 +559,8 @@ template <
class KeyEqual,
class Mutex>
inline double
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::rate() const
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
@@ -563,9 +580,8 @@ template <
class Mutex>
template <class Handler>
inline SharedPointerType
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::fetch(
key_type const& digest,
Handler const& h)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
@@ -596,9 +612,8 @@ template <
class KeyEqual,
class Mutex>
inline SharedPointerType
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::initialFetch(
key_type const& key,
std::lock_guard<mutex_type> const& l)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
{
auto cit = m_cache.find(key);
if (cit == m_cache.end())
@@ -634,7 +649,8 @@ template <
class KeyEqual,
class Mutex>
inline void
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::collect_metrics()
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
collect_metrics()
{
m_stats.size.set(getCacheSize());
@@ -660,13 +676,14 @@ template <
class KeyEqual,
class Mutex>
inline std::thread
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::sweepHelper(
clock_type::time_point const& when_expire,
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
sweepHelper(
clock_type::time_point const& when_expire,
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
@@ -720,8 +737,9 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug()) << "TaggedCache partition sweep " << m_name << ": cache = " << partition.size()
<< "-" << cacheRemovals << ", map-=" << mapRemovals;
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name << ": cache = " << partition.size()
<< "-" << cacheRemovals << ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;
@@ -738,13 +756,14 @@ template <
class KeyEqual,
class Mutex>
inline std::thread
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::sweepHelper(
clock_type::time_point const& when_expire,
clock_type::time_point const& now,
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash, KeyEqual, Mutex>::
sweepHelper(
clock_type::time_point const& when_expire,
clock_type::time_point const& now,
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
{
return std::thread([&, this]() {
int cacheRemovals = 0;
@@ -774,8 +793,9 @@ TaggedCache<Key, T, IsKeyCache, SharedWeakUnionPointer, SharedPointerType, Hash,
if (mapRemovals || cacheRemovals)
{
JLOG(m_journal.debug()) << "TaggedCache partition sweep " << m_name << ": cache = " << partition.size()
<< "-" << cacheRemovals << ", map-=" << mapRemovals;
JLOG(m_journal.debug())
<< "TaggedCache partition sweep " << m_name << ": cache = " << partition.size()
<< "-" << cacheRemovals << ", map-=" << mapRemovals;
}
allRemovals += cacheRemovals;

View File

@@ -51,7 +51,13 @@ generalized_set_intersection(
// std::set_intersection.
template <class FwdIter1, class InputIter2, class Pred, class Comp>
FwdIter1
remove_if_intersect_or_match(FwdIter1 first1, FwdIter1 last1, InputIter2 first2, InputIter2 last2, Pred pred, Comp comp)
remove_if_intersect_or_match(
FwdIter1 first1,
FwdIter1 last1,
InputIter2 first2,
InputIter2 last2,
Pred pred,
Comp comp)
{
// [original-first1, current-first1) is the set of elements to be preserved.
// [current-first1, i) is the set of elements that have been removed.

View File

@@ -214,7 +214,8 @@ private:
std::uint32_t accum = {};
for (std::uint32_t shift : {4u, 0u, 12u, 8u, 20u, 16u, 28u, 24u})
{
if (auto const result = hexCharToUInt(*in++, shift, accum); result != ParseResult::okay)
if (auto const result = hexCharToUInt(*in++, shift, accum);
result != ParseResult::okay)
return Unexpected(result);
}
ret[i++] = accum;
@@ -253,7 +254,8 @@ public:
// This constructor is intended to be used at compile time since it might
// throw at runtime. Consider declaring this constructor consteval once
// we get to C++23.
explicit constexpr base_uint(std::string_view sv) noexcept(false) : data_(parseFromStringViewThrows(sv))
explicit constexpr base_uint(std::string_view sv) noexcept(false)
: data_(parseFromStringViewThrows(sv))
{
}
@@ -442,7 +444,8 @@ public:
for (int i = WIDTH; i--;)
{
std::uint64_t n = carry + boost::endian::big_to_native(data_[i]) + boost::endian::big_to_native(b.data_[i]);
std::uint64_t n = carry + boost::endian::big_to_native(data_[i]) +
boost::endian::big_to_native(b.data_[i]);
data_[i] = boost::endian::native_to_big(static_cast<std::uint32_t>(n));
carry = n >> 32;

View File

@@ -15,7 +15,8 @@ namespace xrpl {
// A few handy aliases
using days = std::chrono::duration<int, std::ratio_multiply<std::chrono::hours::period, std::ratio<24>>>;
using days =
std::chrono::duration<int, std::ratio_multiply<std::chrono::hours::period, std::ratio<24>>>;
using weeks = std::chrono::duration<int, std::ratio_multiply<days::period, std::ratio<7>>>;

View File

@@ -35,7 +35,9 @@ template <class E, class... Args>
[[noreturn]] inline void
Throw(Args&&... args)
{
static_assert(std::is_convertible<E*, std::exception*>::value, "Exception must derive from std::exception.");
static_assert(
std::is_convertible<E*, std::exception*>::value,
"Exception must derive from std::exception.");
E e(std::forward<Args>(args)...);
LogThrow(std::string("Throwing exception of type " + beast::type_name<E>() + ": ") + e.what());

View File

@@ -23,7 +23,8 @@ public:
Collection const& collection;
std::string const delimiter;
explicit CollectionAndDelimiter(Collection const& c, std::string delim) : collection(c), delimiter(std::move(delim))
explicit CollectionAndDelimiter(Collection const& c, std::string delim)
: collection(c), delimiter(std::move(delim))
{
}
@@ -63,7 +64,8 @@ public:
char const* collection;
std::string const delimiter;
explicit CollectionAndDelimiter(char const c[N], std::string delim) : collection(c), delimiter(std::move(delim))
explicit CollectionAndDelimiter(char const c[N], std::string delim)
: collection(c), delimiter(std::move(delim))
{
}

View File

@@ -330,8 +330,8 @@ public:
auto const& key = std::get<0>(keyTuple);
iterator it(&map_);
it.ait_ = it.map_->begin() + partitioner(key);
auto [eit, inserted] =
it.ait_->emplace(std::piecewise_construct, std::forward<T>(keyTuple), std::forward<U>(valueTuple));
auto [eit, inserted] = it.ait_->emplace(
std::piecewise_construct, std::forward<T>(keyTuple), std::forward<U>(valueTuple));
it.mit_ = eit;
return {it, inserted};
}

View File

@@ -19,7 +19,8 @@ static_assert(
"The Ripple default PRNG engine must return an unsigned integral type.");
static_assert(
std::numeric_limits<beast::xor_shift_engine::result_type>::max() >= std::numeric_limits<std::uint64_t>::max(),
std::numeric_limits<beast::xor_shift_engine::result_type>::max() >=
std::numeric_limits<std::uint64_t>::max(),
"The Ripple default PRNG engine return must be at least 64 bits wide.");
#endif
@@ -144,12 +145,14 @@ std::enable_if_t<
Byte>
rand_byte(Engine& engine)
{
return static_cast<Byte>(
rand_int<Engine, std::uint32_t>(engine, std::numeric_limits<Byte>::min(), std::numeric_limits<Byte>::max()));
return static_cast<Byte>(rand_int<Engine, std::uint32_t>(
engine, std::numeric_limits<Byte>::min(), std::numeric_limits<Byte>::max()));
}
template <class Byte = std::uint8_t>
std::enable_if_t<(std::is_same<Byte, unsigned char>::value || std::is_same<Byte, std::uint8_t>::value), Byte>
std::enable_if_t<
(std::is_same<Byte, unsigned char>::value || std::is_same<Byte, std::uint8_t>::value),
Byte>
rand_byte()
{
return rand_byte<Byte>(default_prng());

View File

@@ -18,9 +18,12 @@ template <class Dest, class Src>
inline constexpr std::enable_if_t<std::is_integral_v<Dest> && std::is_integral_v<Src>, Dest>
safe_cast(Src s) noexcept
{
static_assert(std::is_signed_v<Dest> || std::is_unsigned_v<Src>, "Cannot cast signed to unsigned");
static_assert(
std::is_signed_v<Dest> || std::is_unsigned_v<Src>, "Cannot cast signed to unsigned");
constexpr unsigned not_same = std::is_signed_v<Dest> != std::is_signed_v<Src>;
static_assert(sizeof(Dest) >= sizeof(Src) + not_same, "Destination is too small to hold all values of source");
static_assert(
sizeof(Dest) >= sizeof(Src) + not_same,
"Destination is too small to hold all values of source");
return static_cast<Dest>(s);
}

View File

@@ -36,7 +36,8 @@ public:
scope_exit(scope_exit&& rhs) noexcept(
std::is_nothrow_move_constructible_v<EF> || std::is_nothrow_copy_constructible_v<EF>)
: exit_function_{std::forward<EF>(rhs.exit_function_)}, execute_on_destruction_{rhs.execute_on_destruction_}
: exit_function_{std::forward<EF>(rhs.exit_function_)}
, execute_on_destruction_{rhs.execute_on_destruction_}
{
rhs.release();
}
@@ -47,8 +48,9 @@ public:
template <class EFP>
explicit scope_exit(
EFP&& f,
std::enable_if_t<!std::is_same_v<std::remove_cv_t<EFP>, scope_exit> && std::is_constructible_v<EF, EFP>>* =
0) noexcept
std::enable_if_t<
!std::is_same_v<std::remove_cv_t<EFP>, scope_exit> &&
std::is_constructible_v<EF, EFP>>* = 0) noexcept
: exit_function_{std::forward<EFP>(f)}
{
static_assert(std::is_nothrow_constructible_v<EF, decltype(std::forward<EFP>(f))>);
@@ -93,8 +95,9 @@ public:
template <class EFP>
explicit scope_fail(
EFP&& f,
std::enable_if_t<!std::is_same_v<std::remove_cv_t<EFP>, scope_fail> && std::is_constructible_v<EF, EFP>>* =
0) noexcept
std::enable_if_t<
!std::is_same_v<std::remove_cv_t<EFP>, scope_fail> &&
std::is_constructible_v<EF, EFP>>* = 0) noexcept
: exit_function_{std::forward<EFP>(f)}
{
static_assert(std::is_nothrow_constructible_v<EF, decltype(std::forward<EFP>(f))>);
@@ -139,7 +142,9 @@ public:
template <class EFP>
explicit scope_success(
EFP&& f,
std::enable_if_t<!std::is_same_v<std::remove_cv_t<EFP>, scope_success> && std::is_constructible_v<EF, EFP>>* =
std::enable_if_t<
!std::is_same_v<std::remove_cv_t<EFP>, scope_success> &&
std::is_constructible_v<EF, EFP>>* =
0) noexcept(std::is_nothrow_constructible_v<EF, EFP> || std::is_nothrow_constructible_v<EF, EFP&>)
: exit_function_{std::forward<EFP>(f)}
{

View File

@@ -99,9 +99,12 @@ public:
@note For performance reasons, you should strive to have `lock` be
on a cacheline by itself.
*/
packed_spinlock(std::atomic<T>& lock, int index) : bits_(lock), mask_(static_cast<T>(1) << index)
packed_spinlock(std::atomic<T>& lock, int index)
: bits_(lock), mask_(static_cast<T>(1) << index)
{
XRPL_ASSERT(index >= 0 && (mask_ != 0), "xrpl::packed_spinlock::packed_spinlock : valid index and mask");
XRPL_ASSERT(
index >= 0 && (mask_ != 0),
"xrpl::packed_spinlock::packed_spinlock : valid index and mask");
}
[[nodiscard]] bool
@@ -174,7 +177,10 @@ public:
T expected = 0;
return lock_.compare_exchange_weak(
expected, std::numeric_limits<T>::max(), std::memory_order_acquire, std::memory_order_relaxed);
expected,
std::numeric_limits<T>::max(),
std::memory_order_acquire,
std::memory_order_relaxed);
}
void

View File

@@ -10,7 +10,9 @@ std::string
strHex(FwdIt begin, FwdIt end)
{
static_assert(
std::is_convertible<typename std::iterator_traits<FwdIt>::iterator_category, std::forward_iterator_tag>::value,
std::is_convertible<
typename std::iterator_traits<FwdIt>::iterator_category,
std::forward_iterator_tag>::value,
"FwdIt must be a forward iterator");
std::string result;
result.reserve(2 * std::distance(begin, end));

View File

@@ -23,14 +23,15 @@ namespace xrpl {
allowed arithmetic operations.
*/
template <class Int, class Tag>
class tagged_integer
: boost::totally_ordered<
tagged_integer<Int, Tag>,
boost::integer_arithmetic<
tagged_integer<Int, Tag>,
boost::bitwise<
tagged_integer<Int, Tag>,
boost::unit_steppable<tagged_integer<Int, Tag>, boost::shiftable<tagged_integer<Int, Tag>>>>>>
class tagged_integer : boost::totally_ordered<
tagged_integer<Int, Tag>,
boost::integer_arithmetic<
tagged_integer<Int, Tag>,
boost::bitwise<
tagged_integer<Int, Tag>,
boost::unit_steppable<
tagged_integer<Int, Tag>,
boost::shiftable<tagged_integer<Int, Tag>>>>>>
{
private:
Int m_value;
@@ -43,7 +44,8 @@ public:
template <
class OtherInt,
class = typename std::enable_if<std::is_integral<OtherInt>::value && sizeof(OtherInt) <= sizeof(Int)>::type>
class = typename std::enable_if<
std::is_integral<OtherInt>::value && sizeof(OtherInt) <= sizeof(Int)>::type>
explicit constexpr tagged_integer(OtherInt value) noexcept : m_value(value)
{
static_assert(sizeof(tagged_integer) == sizeof(Int), "tagged_integer is adding padding");

View File

@@ -86,7 +86,8 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
boost::asio::post(m_ios, sample_op<Handler>(std::forward<Handler>(handler), Clock::now(), false, this));
boost::asio::post(
m_ios, sample_op<Handler>(std::forward<Handler>(handler), Clock::now(), false, this));
}
/** Initiate continuous i/o latency sampling.
@@ -100,7 +101,8 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
boost::asio::post(m_ios, sample_op<Handler>(std::forward<Handler>(handler), Clock::now(), true, this));
boost::asio::post(
m_ios, sample_op<Handler>(std::forward<Handler>(handler), Clock::now(), true, this));
}
private:
@@ -140,7 +142,11 @@ private:
bool m_repeat;
io_latency_probe* m_probe;
sample_op(Handler const& handler, time_point const& start, bool repeat, io_latency_probe* probe)
sample_op(
Handler const& handler,
time_point const& start,
bool repeat,
io_latency_probe* probe)
: m_handler(handler), m_start(start), m_repeat(repeat), m_probe(probe)
{
XRPL_ASSERT(
@@ -203,12 +209,14 @@ private:
// The latency is too high to maintain the desired
// period so don't bother with a timer.
//
boost::asio::post(m_probe->m_ios, sample_op<Handler>(m_handler, now, m_repeat, m_probe));
boost::asio::post(
m_probe->m_ios, sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
else
{
m_probe->m_timer.expires_after(when - now);
m_probe->m_timer.async_wait(sample_op<Handler>(m_handler, now, m_repeat, m_probe));
m_probe->m_timer.async_wait(
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
}
}
@@ -219,7 +227,8 @@ private:
if (!m_probe)
return;
typename Clock::time_point const now(Clock::now());
boost::asio::post(m_probe->m_ios, sample_op<Handler>(m_handler, now, m_repeat, m_probe));
boost::asio::post(
m_probe->m_ios, sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
};
};

View File

@@ -42,7 +42,9 @@ public:
void
set(time_point const& when)
{
XRPL_ASSERT(!Clock::is_steady || when >= now_, "beast::manual_clock::set(time_point) : forward input");
XRPL_ASSERT(
!Clock::is_steady || when >= now_,
"beast::manual_clock::set(time_point) : forward input");
now_ = when;
}
@@ -60,7 +62,8 @@ public:
advance(std::chrono::duration<Rep, Period> const& elapsed)
{
XRPL_ASSERT(
!Clock::is_steady || (now_ + elapsed) >= now_, "beast::manual_clock::advance(duration) : forward input");
!Clock::is_steady || (now_ + elapsed) >= now_,
"beast::manual_clock::advance(duration) : forward input");
now_ += elapsed;
}

View File

@@ -14,7 +14,8 @@ expire(AgedContainer& c, std::chrono::duration<Rep, Period> const& age)
{
std::size_t n(0);
auto const expired(c.clock().now() - age);
for (auto iter(c.chronological.cbegin()); iter != c.chronological.cend() && iter.when() <= expired;)
for (auto iter(c.chronological.cbegin());
iter != c.chronological.cend() && iter.when() <= expired;)
{
iter = c.erase(iter);
++n;

View File

@@ -13,6 +13,6 @@ template <
class Clock = std::chrono::steady_clock,
class Compare = std::less<Key>,
class Allocator = std::allocator<Key>>
using aged_multiset = detail::aged_ordered_container<true, false, Key, void, Clock, Compare, Allocator>;
using aged_multiset =
detail::aged_ordered_container<true, false, Key, void, Clock, Compare, Allocator>;
}

View File

@@ -15,6 +15,6 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, T>>>
using aged_unordered_map = detail::aged_unordered_container<false, true, Key, T, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_map =
detail::aged_unordered_container<false, true, Key, T, Clock, Hash, KeyEqual, Allocator>;
}

View File

@@ -15,6 +15,6 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, T>>>
using aged_unordered_multimap = detail::aged_unordered_container<true, true, Key, T, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_multimap =
detail::aged_unordered_container<true, true, Key, T, Clock, Hash, KeyEqual, Allocator>;
}

View File

@@ -14,6 +14,6 @@ template <
class Hash = std::hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Allocator = std::allocator<Key>>
using aged_unordered_set = detail::aged_unordered_container<false, false, Key, void, Clock, Hash, KeyEqual, Allocator>;
using aged_unordered_set =
detail::aged_unordered_container<false, false, Key, void, Clock, Hash, KeyEqual, Allocator>;
}

View File

@@ -35,22 +35,26 @@ public:
class = typename std::enable_if<
(other_is_const == false || is_const == true) &&
std::is_same<Iterator, OtherIterator>::value == false>::type>
explicit aged_container_iterator(aged_container_iterator<other_is_const, OtherIterator> const& other)
explicit aged_container_iterator(
aged_container_iterator<other_is_const, OtherIterator> const& other)
: m_iter(other.m_iter)
{
}
// Disable constructing a const_iterator from a non-const_iterator.
template <bool other_is_const, class = typename std::enable_if<other_is_const == false || is_const == true>::type>
aged_container_iterator(aged_container_iterator<other_is_const, Iterator> const& other) : m_iter(other.m_iter)
template <
bool other_is_const,
class = typename std::enable_if<other_is_const == false || is_const == true>::type>
aged_container_iterator(aged_container_iterator<other_is_const, Iterator> const& other)
: m_iter(other.m_iter)
{
}
// Disable assigning a const_iterator to a non-const iterator
template <bool other_is_const, class OtherIterator>
auto
operator=(aged_container_iterator<other_is_const, OtherIterator> const& other) ->
typename std::enable_if<other_is_const == false || is_const == true, aged_container_iterator&>::type
operator=(aged_container_iterator<other_is_const, OtherIterator> const& other) -> typename std::
enable_if<other_is_const == false || is_const == true, aged_container_iterator&>::type
{
m_iter = other.m_iter;
return *this;

View File

@@ -57,7 +57,8 @@ template <
class T,
class Clock = std::chrono::steady_clock,
class Compare = std::less<Key>,
class Allocator = std::allocator<typename std::conditional<IsMap, std::pair<Key const, T>, Key>::type>>
class Allocator =
std::allocator<typename std::conditional<IsMap, std::pair<Key const, T>, Key>::type>>
class aged_ordered_container
{
public:
@@ -83,8 +84,10 @@ private:
}
// VFALCO TODO hoist to remove template argument dependencies
struct element : boost::intrusive::set_base_hook<boost::intrusive::link_mode<boost::intrusive::normal_link>>,
boost::intrusive::list_base_hook<boost::intrusive::link_mode<boost::intrusive::normal_link>>
struct element : boost::intrusive::set_base_hook<
boost::intrusive::link_mode<boost::intrusive::normal_link>>,
boost::intrusive::list_base_hook<
boost::intrusive::link_mode<boost::intrusive::normal_link>>
{
// Stash types here so the iterator doesn't
// need to see the container declaration.
@@ -100,14 +103,17 @@ private:
{
}
element(time_point const& when_, value_type&& value_) : value(std::move(value_)), when(when_)
element(time_point const& when_, value_type&& value_)
: value(std::move(value_)), when(when_)
{
}
template <
class... Args,
class = typename std::enable_if<std::is_constructible<value_type, Args...>::value>::type>
element(time_point const& when_, Args&&... args) : value(std::forward<Args>(args)...), when(when_)
class =
typename std::enable_if<std::is_constructible<value_type, Args...>::value>::type>
element(time_point const& when_, Args&&... args)
: value(std::forward<Args>(args)...), when(when_)
{
}
@@ -191,7 +197,8 @@ private:
}
};
using list_type = typename boost::intrusive::make_list<element, boost::intrusive::constant_time_size<false>>::type;
using list_type = typename boost::intrusive::
make_list<element, boost::intrusive::constant_time_size<false>>::type;
using cont_type = typename std::conditional<
IsMulti,
@@ -199,15 +206,18 @@ private:
element,
boost::intrusive::constant_time_size<true>,
boost::intrusive::compare<KeyValueCompare>>::type,
typename boost::intrusive::
make_set<element, boost::intrusive::constant_time_size<true>, boost::intrusive::compare<KeyValueCompare>>::
type>::type;
typename boost::intrusive::make_set<
element,
boost::intrusive::constant_time_size<true>,
boost::intrusive::compare<KeyValueCompare>>::type>::type;
using ElementAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<element>;
using ElementAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<element>;
using ElementAllocatorTraits = std::allocator_traits<ElementAllocator>;
class config_t : private KeyValueCompare, public beast::detail::empty_base_optimization<ElementAllocator>
class config_t : private KeyValueCompare,
public beast::detail::empty_base_optimization<ElementAllocator>
{
public:
explicit config_t(clock_type& clock_) : clock(clock_)
@@ -224,7 +234,9 @@ private:
}
config_t(clock_type& clock_, Compare const& comp, Allocator const& alloc_)
: KeyValueCompare(comp), beast::detail::empty_base_optimization<ElementAllocator>(alloc_), clock(clock_)
: KeyValueCompare(comp)
, beast::detail::empty_base_optimization<ElementAllocator>(alloc_)
, clock(clock_)
{
}
@@ -337,7 +349,8 @@ private:
std::unique_ptr<element, Deleter> p(
ElementAllocatorTraits::allocate(m_config.alloc(), 1), Deleter(m_config.alloc()));
ElementAllocatorTraits::construct(m_config.alloc(), p.get(), clock().now(), std::forward<Args>(args)...);
ElementAllocatorTraits::construct(
m_config.alloc(), p.get(), clock().now(), std::forward<Args>(args)...);
return p.release();
}
@@ -368,9 +381,12 @@ public:
// A set iterator (IsMap==false) is always const
// because the elements of a set are immutable.
using iterator = beast::detail::aged_container_iterator<!IsMap, typename cont_type::iterator>;
using const_iterator = beast::detail::aged_container_iterator<true, typename cont_type::iterator>;
using reverse_iterator = beast::detail::aged_container_iterator<!IsMap, typename cont_type::reverse_iterator>;
using const_reverse_iterator = beast::detail::aged_container_iterator<true, typename cont_type::reverse_iterator>;
using const_iterator =
beast::detail::aged_container_iterator<true, typename cont_type::iterator>;
using reverse_iterator =
beast::detail::aged_container_iterator<!IsMap, typename cont_type::reverse_iterator>;
using const_reverse_iterator =
beast::detail::aged_container_iterator<true, typename cont_type::reverse_iterator>;
//--------------------------------------------------------------------------
//
@@ -386,9 +402,12 @@ public:
public:
// A set iterator (IsMap==false) is always const
// because the elements of a set are immutable.
using iterator = beast::detail::aged_container_iterator<!IsMap, typename list_type::iterator>;
using const_iterator = beast::detail::aged_container_iterator<true, typename list_type::iterator>;
using reverse_iterator = beast::detail::aged_container_iterator<!IsMap, typename list_type::reverse_iterator>;
using iterator =
beast::detail::aged_container_iterator<!IsMap, typename list_type::iterator>;
using const_iterator =
beast::detail::aged_container_iterator<true, typename list_type::iterator>;
using reverse_iterator =
beast::detail::aged_container_iterator<!IsMap, typename list_type::reverse_iterator>;
using const_reverse_iterator =
beast::detail::aged_container_iterator<true, typename list_type::reverse_iterator>;
@@ -469,7 +488,8 @@ public:
{
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
return list.iterator_to(*reinterpret_cast<element*>(
reinterpret_cast<uint8_t*>(&value) - ((std::size_t)std::addressof(((element*)0)->member))));
reinterpret_cast<uint8_t*>(&value) -
((std::size_t)std::addressof(((element*)0)->member))));
}
const_iterator
@@ -477,7 +497,8 @@ public:
{
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
return list.iterator_to(*reinterpret_cast<element const*>(
reinterpret_cast<uint8_t const*>(&value) - ((std::size_t)std::addressof(((element*)0)->member))));
reinterpret_cast<uint8_t const*>(&value) -
((std::size_t)std::addressof(((element*)0)->member))));
}
private:
@@ -518,7 +539,12 @@ public:
aged_ordered_container(InputIt first, InputIt last, clock_type& clock, Allocator const& alloc);
template <class InputIt>
aged_ordered_container(InputIt first, InputIt last, clock_type& clock, Compare const& comp, Allocator const& alloc);
aged_ordered_container(
InputIt first,
InputIt last,
clock_type& clock,
Compare const& comp,
Allocator const& alloc);
aged_ordered_container(aged_ordered_container const& other);
@@ -530,9 +556,15 @@ public:
aged_ordered_container(std::initializer_list<value_type> init, clock_type& clock);
aged_ordered_container(std::initializer_list<value_type> init, clock_type& clock, Compare const& comp);
aged_ordered_container(
std::initializer_list<value_type> init,
clock_type& clock,
Compare const& comp);
aged_ordered_container(std::initializer_list<value_type> init, clock_type& clock, Allocator const& alloc);
aged_ordered_container(
std::initializer_list<value_type> init,
clock_type& clock,
Allocator const& alloc);
aged_ordered_container(
std::initializer_list<value_type> init,
@@ -688,7 +720,8 @@ public:
{
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
return m_cont.iterator_to(*reinterpret_cast<element*>(
reinterpret_cast<uint8_t*>(&value) - ((std::size_t)std::addressof(((element*)0)->member))));
reinterpret_cast<uint8_t*>(&value) -
((std::size_t)std::addressof(((element*)0)->member))));
}
const_iterator
@@ -696,7 +729,8 @@ public:
{
static_assert(std::is_standard_layout<element>::value, "must be standard layout");
return m_cont.iterator_to(*reinterpret_cast<element const*>(
reinterpret_cast<uint8_t const*>(&value) - ((std::size_t)std::addressof(((element*)0)->member))));
reinterpret_cast<uint8_t const*>(&value) -
((std::size_t)std::addressof(((element*)0)->member))));
}
//--------------------------------------------------------------------------
@@ -735,7 +769,8 @@ public:
// map, set
template <bool maybe_multi = IsMulti>
auto
insert(value_type const& value) -> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
insert(value_type const& value) ->
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
// multimap, multiset
template <bool maybe_multi = IsMulti>
@@ -745,19 +780,22 @@ public:
// set
template <bool maybe_multi = IsMulti, bool maybe_map = IsMap>
auto
insert(value_type&& value) -> typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type;
insert(value_type&& value) ->
typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type;
// multiset
template <bool maybe_multi = IsMulti, bool maybe_map = IsMap>
auto
insert(value_type&& value) -> typename std::enable_if<maybe_multi && !maybe_map, iterator>::type;
insert(value_type&& value) ->
typename std::enable_if<maybe_multi && !maybe_map, iterator>::type;
//---
// map, set
template <bool maybe_multi = IsMulti>
auto
insert(const_iterator hint, value_type const& value) -> typename std::enable_if<!maybe_multi, iterator>::type;
insert(const_iterator hint, value_type const& value) ->
typename std::enable_if<!maybe_multi, iterator>::type;
// multimap, multiset
template <bool maybe_multi = IsMulti>
@@ -771,7 +809,8 @@ public:
// map, set
template <bool maybe_multi = IsMulti>
auto
insert(const_iterator hint, value_type&& value) -> typename std::enable_if<!maybe_multi, iterator>::type;
insert(const_iterator hint, value_type&& value) ->
typename std::enable_if<!maybe_multi, iterator>::type;
// multimap, multiset
template <bool maybe_multi = IsMulti>
@@ -819,7 +858,8 @@ public:
// map, set
template <bool maybe_multi = IsMulti, class... Args>
auto
emplace(Args&&... args) -> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
emplace(Args&&... args) ->
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type;
// multiset, multimap
template <bool maybe_multi = IsMulti, class... Args>
@@ -842,13 +882,19 @@ public:
}
// enable_if prevents erase (reverse_iterator pos) from compiling
template <bool is_const, class Iterator, class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
template <
bool is_const,
class Iterator,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
beast::detail::aged_container_iterator<false, Iterator>
erase(beast::detail::aged_container_iterator<is_const, Iterator> pos);
// enable_if prevents erase (reverse_iterator first, reverse_iterator last)
// from compiling
template <bool is_const, class Iterator, class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
template <
bool is_const,
class Iterator,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
beast::detail::aged_container_iterator<false, Iterator>
erase(
beast::detail::aged_container_iterator<is_const, Iterator> first,
@@ -864,7 +910,10 @@ public:
//--------------------------------------------------------------------------
// enable_if prevents touch (reverse_iterator pos) from compiling
template <bool is_const, class Iterator, class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
template <
bool is_const,
class Iterator,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
void
touch(beast::detail::aged_container_iterator<is_const, Iterator> pos)
{
@@ -984,69 +1033,136 @@ public:
// is only done on the key portion of the value type, ignoring
// the mapped type.
//
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
operator==(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const&
other) const;
operator==(aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const;
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
operator!=(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const&
other) const
operator!=(aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const
{
return !(this->operator==(other));
}
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
operator<(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const&
other) const
operator<(aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const
{
value_compare const comp(value_comp());
return std::lexicographical_compare(cbegin(), cend(), other.cbegin(), other.cend(), comp);
}
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
operator<=(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const&
other) const
operator<=(aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const
{
return !(other < *this);
}
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
operator>(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const&
other) const
operator>(aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const
{
return other < *this;
}
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
operator>=(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const&
other) const
operator>=(aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const
{
return !(*this < other);
}
private:
// enable_if prevents erase (reverse_iterator pos, now) from compiling
template <bool is_const, class Iterator, class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
template <
bool is_const,
class Iterator,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
void
touch(beast::detail::aged_container_iterator<is_const, Iterator> pos, typename clock_type::time_point const& now);
touch(
beast::detail::aged_container_iterator<is_const, Iterator> pos,
typename clock_type::time_point const& now);
template <bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
template <
bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
typename std::enable_if<maybe_propagate>::type
swap_data(aged_ordered_container& other) noexcept;
template <bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
template <
bool maybe_propagate = std::allocator_traits<Allocator>::propagate_on_container_swap::value>
typename std::enable_if<!maybe_propagate>::type
swap_data(aged_ordered_container& other) noexcept;
@@ -1058,7 +1174,8 @@ private:
//------------------------------------------------------------------------------
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::aged_ordered_container(clock_type& clock)
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::aged_ordered_container(
clock_type& clock)
: m_config(clock)
{
}
@@ -1249,8 +1366,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::opera
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator=(aged_ordered_container&& other)
-> aged_ordered_container&
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator=(
aged_ordered_container&& other) -> aged_ordered_container&
{
clear();
this->m_config = std::move(other.m_config);
@@ -1296,13 +1413,15 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::at(K
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi, bool maybe_map, class>
typename std::conditional<IsMap, T, void*>::type&
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator[](Key const& key)
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator[](
Key const& key)
{
typename cont_type::insert_commit_data d;
auto const result(m_cont.insert_check(key, std::cref(m_config.key_compare()), d));
if (result.second)
{
element* const p(new_element(std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple()));
element* const p(new_element(
std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple()));
m_cont.insert_commit(*p, d);
chronological.list.push_back(*p);
return p->value.second;
@@ -1319,8 +1438,10 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::opera
auto const result(m_cont.insert_check(key, std::cref(m_config.key_compare()), d));
if (result.second)
{
element* const p(
new_element(std::piecewise_construct, std::forward_as_tuple(std::move(key)), std::forward_as_tuple()));
element* const p(new_element(
std::piecewise_construct,
std::forward_as_tuple(std::move(key)),
std::forward_as_tuple()));
m_cont.insert_commit(*p, d);
chronological.list.push_back(*p);
return p->value.second;
@@ -1344,7 +1465,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::clear
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(value_type const& value) ->
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
value_type const& value) ->
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
{
typename cont_type::insert_commit_data d;
@@ -1363,8 +1485,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::inser
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(value_type const& value) ->
typename std::enable_if<maybe_multi, iterator>::type
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
value_type const& value) -> typename std::enable_if<maybe_multi, iterator>::type
{
auto const before(m_cont.upper_bound(extract(value), std::cref(m_config.key_compare())));
element* const p(new_element(value));
@@ -1377,7 +1499,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::inser
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi, bool maybe_map>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(value_type&& value) ->
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
value_type&& value) ->
typename std::enable_if<!maybe_multi && !maybe_map, std::pair<iterator, bool>>::type
{
typename cont_type::insert_commit_data d;
@@ -1396,8 +1519,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::inser
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi, bool maybe_map>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(value_type&& value) ->
typename std::enable_if<maybe_multi && !maybe_map, iterator>::type
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::insert(
value_type&& value) -> typename std::enable_if<maybe_multi && !maybe_map, iterator>::type
{
auto const before(m_cont.upper_bound(extract(value), std::cref(m_config.key_compare())));
element* const p(new_element(std::move(value)));
@@ -1417,7 +1540,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::inser
value_type const& value) -> typename std::enable_if<!maybe_multi, iterator>::type
{
typename cont_type::insert_commit_data d;
auto const result(m_cont.insert_check(hint.iterator(), extract(value), std::cref(m_config.key_compare()), d));
auto const result(
m_cont.insert_check(hint.iterator(), extract(value), std::cref(m_config.key_compare()), d));
if (result.second)
{
element* const p(new_element(value));
@@ -1437,7 +1561,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::inser
value_type&& value) -> typename std::enable_if<!maybe_multi, iterator>::type
{
typename cont_type::insert_commit_data d;
auto const result(m_cont.insert_check(hint.iterator(), extract(value), std::cref(m_config.key_compare()), d));
auto const result(
m_cont.insert_check(hint.iterator(), extract(value), std::cref(m_config.key_compare()), d));
if (result.second)
{
element* const p(new_element(std::move(value)));
@@ -1452,8 +1577,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::inser
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi, class... Args>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace(Args&&... args) ->
typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace(Args&&... args)
-> typename std::enable_if<!maybe_multi, std::pair<iterator, bool>>::type
{
// VFALCO NOTE Its unfortunate that we need to
// construct element here
@@ -1474,8 +1599,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::empla
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool maybe_multi, class... Args>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace(Args&&... args) ->
typename std::enable_if<maybe_multi, iterator>::type
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::emplace(Args&&... args)
-> typename std::enable_if<maybe_multi, iterator>::type
{
element* const p(new_element(std::forward<Args>(args)...));
auto const before(m_cont.upper_bound(extract(p->value), std::cref(m_config.key_compare())));
@@ -1496,7 +1621,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::empla
// construct element here
element* const p(new_element(std::forward<Args>(args)...));
typename cont_type::insert_commit_data d;
auto const result(m_cont.insert_check(hint.iterator(), extract(p->value), std::cref(m_config.key_compare()), d));
auto const result(m_cont.insert_check(
hint.iterator(), extract(p->value), std::cref(m_config.key_compare()), d));
if (result.second)
{
auto const iter(m_cont.insert_commit(*p, d));
@@ -1533,7 +1659,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::erase
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <class K>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::erase(K const& k) -> size_type
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::erase(K const& k)
-> size_type
{
auto iter(m_cont.find(k, std::cref(m_config.key_compare())));
if (iter == m_cont.end())
@@ -1553,7 +1680,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::erase
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
void
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap(aged_ordered_container& other) noexcept
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap(
aged_ordered_container& other) noexcept
{
swap_data(other);
std::swap(chronological, other.chronological);
@@ -1565,7 +1693,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap(
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <class K>
auto
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::touch(K const& k) -> size_type
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::touch(K const& k)
-> size_type
{
auto const now(clock().now());
size_type n(0);
@@ -1581,13 +1710,31 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::touch
//------------------------------------------------------------------------------
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
template <bool OtherIsMulti, bool OtherIsMap, class OtherT, class OtherDuration, class OtherAllocator>
template <
bool OtherIsMulti,
bool OtherIsMap,
class OtherT,
class OtherDuration,
class OtherAllocator>
bool
aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::operator==(
aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator> const& other)
const
aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator> const& other) const
{
using Other = aged_ordered_container<OtherIsMulti, OtherIsMap, Key, OtherT, OtherDuration, Compare, OtherAllocator>;
using Other = aged_ordered_container<
OtherIsMulti,
OtherIsMap,
Key,
OtherT,
OtherDuration,
Compare,
OtherAllocator>;
if (size() != other.size())
return false;
std::equal_to<void> eq;
@@ -1642,7 +1789,8 @@ aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>::swap_
//------------------------------------------------------------------------------
template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compare, class Allocator>
struct is_aged_container<beast::detail::aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>>
struct is_aged_container<
beast::detail::aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>>
: std::true_type
{
explicit is_aged_container() = default;
@@ -1654,7 +1802,8 @@ template <bool IsMulti, bool IsMap, class Key, class T, class Clock, class Compa
void
swap(
beast::detail::aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>& lhs,
beast::detail::aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>& rhs) noexcept
beast::detail::aged_ordered_container<IsMulti, IsMap, Key, T, Clock, Compare, Allocator>&
rhs) noexcept
{
lhs.swap(rhs);
}
@@ -1677,7 +1826,8 @@ expire(
{
std::size_t n(0);
auto const expired(c.clock().now() - age);
for (auto iter(c.chronological.cbegin()); iter != c.chronological.cend() && iter.when() <= expired;)
for (auto iter(c.chronological.cbegin());
iter != c.chronological.cend() && iter.when() <= expired;)
{
iter = c.erase(iter);
++n;

File diff suppressed because it is too large Load Diff

View File

@@ -50,7 +50,9 @@ struct LexicalCast<Out, std::string_view>
{
explicit LexicalCast() = default;
static_assert(std::is_integral_v<Out>, "beast::LexicalCast can only be used with integral types");
static_assert(
std::is_integral_v<Out>,
"beast::LexicalCast can only be used with integral types");
template <class Integral = Out>
std::enable_if_t<std::is_integral_v<Integral> && !std::is_same_v<Integral, bool>, bool>

View File

@@ -19,10 +19,12 @@ public:
using iterator_category = std::forward_iterator_tag;
using value_type = typename Container::value_type;
using difference_type = typename Container::difference_type;
using pointer =
typename std::conditional<IsConst, typename Container::const_pointer, typename Container::pointer>::type;
using reference =
typename std::conditional<IsConst, typename Container::const_reference, typename Container::reference>::type;
using pointer = typename std::
conditional<IsConst, typename Container::const_pointer, typename Container::pointer>::type;
using reference = typename std::conditional<
IsConst,
typename Container::const_reference,
typename Container::reference>::type;
LockFreeStackIterator() : m_node()
{
@@ -33,7 +35,8 @@ public:
}
template <bool OtherIsConst>
explicit LockFreeStackIterator(LockFreeStackIterator<Container, OtherIsConst> const& other) : m_node(other.m_node)
explicit LockFreeStackIterator(LockFreeStackIterator<Container, OtherIsConst> const& other)
: m_node(other.m_node)
{
}
@@ -190,7 +193,8 @@ public:
{
first = (old_head == &m_end);
node->m_next = old_head;
} while (!m_head.compare_exchange_strong(old_head, node, std::memory_order_release, std::memory_order_relaxed));
} while (!m_head.compare_exchange_strong(
old_head, node, std::memory_order_release, std::memory_order_relaxed));
return first;
}
@@ -213,7 +217,8 @@ public:
if (node == &m_end)
return nullptr;
new_head = node->m_next.load();
} while (!m_head.compare_exchange_strong(node, new_head, std::memory_order_release, std::memory_order_relaxed));
} while (!m_head.compare_exchange_strong(
node, new_head, std::memory_order_release, std::memory_order_relaxed));
return static_cast<Element*>(node);
}

View File

@@ -26,7 +26,8 @@ template <class T>
inline void
reverse_bytes(T& t)
{
unsigned char* bytes = static_cast<unsigned char*>(std::memmove(std::addressof(t), std::addressof(t), sizeof(T)));
unsigned char* bytes =
static_cast<unsigned char*>(std::memmove(std::addressof(t), std::addressof(t), sizeof(T)));
for (unsigned i = 0; i < sizeof(T) / 2; ++i)
std::swap(bytes[i], bytes[sizeof(T) - 1 - i]);
}
@@ -51,7 +52,8 @@ template <class T, class Hasher>
inline void
maybe_reverse_bytes(T& t, Hasher&)
{
maybe_reverse_bytes(t, std::integral_constant<bool, Hasher::endian != boost::endian::order::native>{});
maybe_reverse_bytes(
t, std::integral_constant<bool, Hasher::endian != boost::endian::order::native>{});
}
} // namespace detail
@@ -65,8 +67,9 @@ maybe_reverse_bytes(T& t, Hasher&)
template <class T>
struct is_uniquely_represented
: public std::
integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value || std::is_pointer<T>::value>
: public std::integral_constant<
bool,
std::is_integral<T>::value || std::is_enum<T>::value || std::is_pointer<T>::value>
{
explicit is_uniquely_represented() = default;
};
@@ -107,7 +110,8 @@ template <class... T>
struct is_uniquely_represented<std::tuple<T...>>
: public std::integral_constant<
bool,
std::conjunction_v<is_uniquely_represented<T>...> && sizeof(std::tuple<T...>) == (sizeof(T) + ...)>
std::conjunction_v<is_uniquely_represented<T>...> &&
sizeof(std::tuple<T...>) == (sizeof(T) + ...)>
{
explicit is_uniquely_represented() = default;
};
@@ -124,8 +128,9 @@ struct is_uniquely_represented<T[N]> : public is_uniquely_represented<T>
template <class T, std::size_t N>
struct is_uniquely_represented<std::array<T, N>>
: public std::
integral_constant<bool, is_uniquely_represented<T>::value && sizeof(T) * N == sizeof(std::array<T, N>)>
: public std::integral_constant<
bool,
is_uniquely_represented<T>::value && sizeof(T) * N == sizeof(std::array<T, N>)>
{
explicit is_uniquely_represented() = default;
};
@@ -145,10 +150,11 @@ struct is_uniquely_represented<std::array<T, N>>
*/
/** @{ */
template <class T, class HashAlgorithm>
struct is_contiguously_hashable : public std::integral_constant<
bool,
is_uniquely_represented<T>::value &&
(sizeof(T) == 1 || HashAlgorithm::endian == boost::endian::order::native)>
struct is_contiguously_hashable
: public std::integral_constant<
bool,
is_uniquely_represented<T>::value &&
(sizeof(T) == 1 || HashAlgorithm::endian == boost::endian::order::native)>
{
explicit is_contiguously_hashable() = default;
};

View File

@@ -25,7 +25,8 @@ struct ci_equal_pred
operator()(char c1, char c2)
{
// VFALCO TODO Use a table lookup here
return std::tolower(static_cast<unsigned char>(c1)) == std::tolower(static_cast<unsigned char>(c2));
return std::tolower(static_cast<unsigned char>(c1)) ==
std::tolower(static_cast<unsigned char>(c2));
}
};
@@ -169,7 +170,9 @@ split(FwdIt first, FwdIt last, Char delim)
return result;
}
template <class FwdIt, class Result = std::vector<std::basic_string<typename std::iterator_traits<FwdIt>::value_type>>>
template <
class FwdIt,
class Result = std::vector<std::basic_string<typename std::iterator_traits<FwdIt>::value_type>>>
Result
split_commas(FwdIt first, FwdIt last)
{
@@ -356,8 +359,10 @@ bool
is_keep_alive(boost::beast::http::message<isRequest, Body, Fields> const& m)
{
if (m.version() <= 10)
return boost::beast::http::token_list{m[boost::beast::http::field::connection]}.exists("keep-alive");
return !boost::beast::http::token_list{m[boost::beast::http::field::connection]}.exists("close");
return boost::beast::http::token_list{m[boost::beast::http::field::connection]}.exists(
"keep-alive");
return !boost::beast::http::token_list{m[boost::beast::http::field::connection]}.exists(
"close");
}
} // namespace rfc2616

View File

@@ -40,7 +40,8 @@ public:
/// The type of yield context passed to functions.
using yield_context = boost::asio::yield_context;
explicit enable_yield_to(std::size_t concurrency = 1) : work_(boost::asio::make_work_guard(ios_))
explicit enable_yield_to(std::size_t concurrency = 1)
: work_(boost::asio::make_work_guard(ios_))
{
threads_.reserve(concurrency);
while (concurrency--)

View File

@@ -22,7 +22,12 @@ global_suites()
template <class Suite>
struct insert_suite
{
insert_suite(char const* name, char const* module, char const* library, bool manual, int priority)
insert_suite(
char const* name,
char const* module,
char const* library,
bool manual,
int priority)
{
global_suites().insert<Suite>(name, module, library, manual, priority);
}

View File

@@ -139,7 +139,10 @@ reporter<_>::results::add(suite_results const& r)
if (elapsed >= std::chrono::seconds{1})
{
auto const iter = std::lower_bound(
top.begin(), top.end(), elapsed, [](run_time const& t1, typename clock_type::duration const& t2) {
top.begin(),
top.end(),
elapsed,
[](run_time const& t1, typename clock_type::duration const& t2) {
return t1.second > t2;
});
if (iter != top.end())
@@ -172,8 +175,9 @@ reporter<_>::~reporter()
os_ << std::setw(8) << fmtdur(i.second) << " " << i.first << '\n';
}
auto const elapsed = clock_type::now() - results_.start;
os_ << fmtdur(elapsed) << ", " << amount{results_.suites, "suite"} << ", " << amount{results_.cases, "case"} << ", "
<< amount{results_.total, "test"} << " total, " << amount{results_.failed, "failure"} << std::endl;
os_ << fmtdur(elapsed) << ", " << amount{results_.suites, "suite"} << ", "
<< amount{results_.cases, "case"} << ", " << amount{results_.total, "test"} << " total, "
<< amount{results_.failed, "failure"} << std::endl;
}
template <class _>
@@ -208,7 +212,8 @@ void
reporter<_>::on_case_begin(std::string const& name)
{
case_results_ = case_results(name);
os_ << suite_results_.name << (case_results_.name.empty() ? "" : (" " + case_results_.name)) << std::endl;
os_ << suite_results_.name << (case_results_.name.empty() ? "" : (" " + case_results_.name))
<< std::endl;
}
template <class _>
@@ -231,7 +236,8 @@ reporter<_>::on_fail(std::string const& reason)
{
++case_results_.failed;
++case_results_.total;
os_ << "#" << case_results_.total << " failed" << (reason.empty() ? "" : ": ") << reason << std::endl;
os_ << "#" << case_results_.total << " failed" << (reason.empty() ? "" : ": ") << reason
<< std::endl;
}
template <class _>

View File

@@ -91,7 +91,10 @@ private:
}
};
template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
template <
class CharT,
class Traits = std::char_traits<CharT>,
class Allocator = std::allocator<CharT>>
class log_os : public std::basic_ostream<CharT, Traits>
{
log_buf<CharT, Traits, Allocator> buf_;
@@ -562,7 +565,8 @@ suite::run(runner& r)
If the condition is false, the file and line number are reported.
*/
#define BEAST_EXPECTS(cond, reason) ((cond) ? (pass(), true) : (fail((reason), __FILE__, __LINE__), false))
#define BEAST_EXPECTS(cond, reason) \
((cond) ? (pass(), true) : (fail((reason), __FILE__, __LINE__), false))
#endif
} // namespace unit_test
@@ -572,9 +576,9 @@ suite::run(runner& r)
// detail:
// This inserts the suite with the given manual flag
#define BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, manual, priority) \
static beast::unit_test::detail::insert_suite<Class##_test> Library##Module##Class##_test_instance( \
#Class, #Module, #Library, manual, priority)
#define BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, manual, priority) \
static beast::unit_test::detail::insert_suite<Class##_test> \
Library##Module##Class##_test_instance(#Class, #Module, #Library, manual, priority)
//------------------------------------------------------------------------------
@@ -629,7 +633,8 @@ suite::run(runner& r)
#else
#include <xrpl/beast/unit_test/global_suites.h>
#define BEAST_DEFINE_TESTSUITE(Class, Module, Library) BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, false, 0)
#define BEAST_DEFINE_TESTSUITE(Class, Module, Library) \
BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, false, 0)
#define BEAST_DEFINE_TESTSUITE_MANUAL(Class, Module, Library) \
BEAST_DEFINE_TESTSUITE_INSERT(Class, Module, Library, true, 0)
#define BEAST_DEFINE_TESTSUITE_PRIO(Class, Module, Library, Priority) \

View File

@@ -27,7 +27,13 @@ class suite_info
run_type run_;
public:
suite_info(std::string name, std::string module, std::string library, bool manual, int priority, run_type run)
suite_info(
std::string name,
std::string module,
std::string library,
bool manual,
int priority,
run_type run)
: name_(std::move(name))
, module_(std::move(module))
, library_(std::move(library))
@@ -91,10 +97,17 @@ public:
/// Convenience for producing suite_info for a given test type.
template <class Suite>
suite_info
make_suite_info(std::string name, std::string module, std::string library, bool manual, int priority)
make_suite_info(
std::string name,
std::string module,
std::string library,
bool manual,
int priority)
{
return suite_info(
std::move(name), std::move(module), std::move(library), manual, priority, [](runner& r) { Suite{}(r); });
std::move(name), std::move(module), std::move(library), manual, priority, [](runner& r) {
Suite{}(r);
});
}
} // namespace unit_test

View File

@@ -39,7 +39,12 @@ public:
template <class Suite>
void
suite_list::insert(char const* name, char const* module, char const* library, bool manual, int priority)
suite_list::insert(
char const* name,
char const* module,
char const* library,
bool manual,
int priority)
{
#ifndef NDEBUG
{

View File

@@ -190,7 +190,8 @@ public:
*/
Stream(Sink& sink, Severity level) : m_sink(sink), m_level(level)
{
XRPL_ASSERT(m_level < severities::kDisabled, "beast::Journal::Stream::Stream : maximum level");
XRPL_ASSERT(
m_level < severities::kDisabled, "beast::Journal::Stream::Stream : maximum level");
}
/** Construct or copy another Stream. */
@@ -421,7 +422,8 @@ class basic_logstream : public std::basic_ostream<CharT, Traits>
detail::logstream_buf<CharT, Traits> buf_;
public:
explicit basic_logstream(beast::Journal::Stream const& strm) : std::basic_ostream<CharT, Traits>(&buf_), buf_(strm)
explicit basic_logstream(beast::Journal::Stream const& strm)
: std::basic_ostream<CharT, Traits>(&buf_), buf_(strm)
{
}
};

View File

@@ -19,7 +19,8 @@
#endif
#define XRPL_ASSERT ALWAYS_OR_UNREACHABLE
#define XRPL_ASSERT_PARTS(cond, function, description, ...) XRPL_ASSERT(cond, function " : " description)
#define XRPL_ASSERT_PARTS(cond, function, description, ...) \
XRPL_ASSERT(cond, function " : " description)
// How to use the instrumentation macros:
//

View File

@@ -9,8 +9,10 @@ template <bool IsConst, class T>
struct maybe_const
{
explicit maybe_const() = default;
using type = typename std::
conditional<IsConst, typename std::remove_const<T>::type const, typename std::remove_const<T>::type>::type;
using type = typename std::conditional<
IsConst,
typename std::remove_const<T>::type const,
typename std::remove_const<T>::type>::type;
};
/** Alias for omitting `typename`. */

View File

@@ -35,7 +35,10 @@ rngfill(void* const buffer, std::size_t const bytes, Generator& g)
}
}
template <class Generator, std::size_t N, class = std::enable_if_t<N % sizeof(typename Generator::result_type) == 0>>
template <
class Generator,
std::size_t N,
class = std::enable_if_t<N % sizeof(typename Generator::result_type) == 0>>
void
rngfill(std::array<std::uint8_t, N>& a, Generator& g)
{

View File

@@ -76,7 +76,8 @@ inline bool
operator==(Fulfillment const& lhs, Fulfillment const& rhs)
{
// FIXME: for compound conditions, need to also check subtypes
return lhs.type() == rhs.type() && lhs.cost() == rhs.cost() && lhs.fingerprint() == rhs.fingerprint();
return lhs.type() == rhs.type() && lhs.cost() == rhs.cost() &&
lhs.fingerprint() == rhs.fingerprint();
}
inline bool

View File

@@ -11,7 +11,8 @@ JobQueue::Coro::Coro(Coro_create_t, JobQueue& jq, JobType type, std::string cons
, name_(name)
, running_(false)
, coro_(
[this, fn = std::forward<F>(f)](boost::coroutines::asymmetric_coroutine<void>::push_type& do_yield) {
[this, fn = std::forward<F>(f)](
boost::coroutines::asymmetric_coroutine<void>::push_type& do_yield) {
yield_ = &do_yield;
yield();
fn(shared_from_this());

View File

@@ -210,7 +210,11 @@ public:
// Add a peer suppression and return whether the entry should be processed
bool
shouldProcess(uint256 const& key, PeerShortID peer, HashRouterFlags& flags, std::chrono::seconds tx_interval);
shouldProcess(
uint256 const& key,
PeerShortID peer,
HashRouterFlags& flags,
std::chrono::seconds tx_interval);
/** Set the flags on a hash.
@@ -248,7 +252,8 @@ private:
Setup const setup_;
// Stores all suppressed hashes and their expiration time
beast::aged_unordered_map<uint256, Entry, Stopwatch::clock_type, hardened_hash<strong_hash>> suppressionMap_;
beast::aged_unordered_map<uint256, Entry, Stopwatch::clock_type, hardened_hash<strong_hash>>
suppressionMap_;
};
} // namespace xrpl

View File

@@ -92,7 +92,11 @@ public:
Job(JobType type, std::uint64_t index);
// VFALCO TODO try to remove the dependency on LoadMonitor.
Job(JobType type, std::string const& name, std::uint64_t index, LoadMonitor& lm, std::function<void()> const& job);
Job(JobType type,
std::string const& name,
std::uint64_t index,
LoadMonitor& lm,
std::function<void()> const& job);
JobType
getType() const;

View File

@@ -140,7 +140,8 @@ public:
*/
template <
typename JobHandler,
typename = std::enable_if_t<std::is_same<decltype(std::declval<JobHandler&&>()()), void>::value>>
typename =
std::enable_if_t<std::is_same<decltype(std::declval<JobHandler&&>()()), void>::value>>
bool
addJob(JobType type, std::string const& name, JobHandler&& jobHandler)
{

View File

@@ -31,8 +31,16 @@ public:
beast::insight::Event dequeue;
beast::insight::Event execute;
JobTypeData(JobTypeInfo const& info_, beast::insight::Collector::ptr const& collector, Logs& logs) noexcept
: m_load(logs.journal("LoadMonitor")), m_collector(collector), info(info_), waiting(0), running(0), deferred(0)
JobTypeData(
JobTypeInfo const& info_,
beast::insight::Collector::ptr const& collector,
Logs& logs) noexcept
: m_load(logs.journal("LoadMonitor"))
, m_collector(collector)
, info(info_)
, waiting(0)
, running(0)
, deferred(0)
{
m_load.setTargetLatency(info.getAverageLatency(), info.getPeakLatency());

View File

@@ -32,7 +32,11 @@ public:
int limit,
std::chrono::milliseconds avgLatency,
std::chrono::milliseconds peakLatency)
: m_type(type), m_name(std::move(name)), m_limit(limit), m_avgLatency(avgLatency), m_peakLatency(peakLatency)
: m_type(type)
, m_name(std::move(name))
, m_limit(limit)
, m_avgLatency(avgLatency)
, m_peakLatency(peakLatency)
{
}

View File

@@ -15,7 +15,13 @@ public:
using const_iterator = Map::const_iterator;
private:
JobTypes() : m_unknown(jtINVALID, "invalid", 0, std::chrono::milliseconds{0}, std::chrono::milliseconds{0})
JobTypes()
: m_unknown(
jtINVALID,
"invalid",
0,
std::chrono::milliseconds{0},
std::chrono::milliseconds{0})
{
using namespace std::chrono_literals;
int maxLimit = std::numeric_limits<int>::max();
@@ -26,7 +32,9 @@ private:
int limit,
std::chrono::milliseconds avgLatency,
std::chrono::milliseconds peakLatency) {
XRPL_ASSERT(m_map.find(jt) == m_map.end(), "xrpl::JobTypes::JobTypes::add : unique job type input");
XRPL_ASSERT(
m_map.find(jt) == m_map.end(),
"xrpl::JobTypes::JobTypes::add : unique job type input");
[[maybe_unused]] auto const inserted =
m_map

View File

@@ -55,7 +55,8 @@ struct KeyEqual final
class PeerReservationTable final
{
public:
explicit PeerReservationTable(beast::Journal journal = beast::Journal(beast::Journal::getNullSink()))
explicit PeerReservationTable(
beast::Journal journal = beast::Journal(beast::Journal::getNullSink()))
: journal_(journal)
{
}

View File

@@ -151,7 +151,11 @@ PerfLog::Setup
setup_PerfLog(Section const& section, boost::filesystem::path const& configDir);
std::unique_ptr<PerfLog>
make_PerfLog(PerfLog::Setup const& setup, Application& app, beast::Journal journal, std::function<void()>&& signalStop);
make_PerfLog(
PerfLog::Setup const& setup,
Application& app,
beast::Journal journal,
std::function<void()>&& signalStop);
template <typename Func, class Rep, class Period>
auto

View File

@@ -160,7 +160,8 @@ private:
Idle: Active, but blocked on waiting for a task.
Paused: Blocked waiting to exit or become active.
*/
class Worker : public beast::LockFreeStack<Worker>::Node, public beast::LockFreeStack<Worker, PausedTag>::Node
class Worker : public beast::LockFreeStack<Worker>::Node,
public beast::LockFreeStack<Worker, PausedTag>::Node
{
public:
Worker(Workers& workers, std::string const& threadName, int const instance);

View File

@@ -145,7 +145,11 @@ private:
bool
decodeUnicodeCodePoint(Token& token, Location& current, Location end, unsigned int& unicode);
bool
decodeUnicodeEscapeSequence(Token& token, Location& current, Location end, unsigned int& unicode);
decodeUnicodeEscapeSequence(
Token& token,
Location& current,
Location end,
unsigned int& unicode);
bool
addError(std::string const& message, Token& token, Location extra = 0);
bool

View File

@@ -315,7 +315,8 @@ public:
operator<<(std::ostream& o, Compact const& cJv)
{
detail::write_value(
[&o](void const* data, std::size_t n) { o.write(static_cast<char const*>(data), n); }, cJv.jv_);
[&o](void const* data, std::size_t n) { o.write(static_cast<char const*>(data), n); },
cJv.jv_);
return o;
}
};

View File

@@ -1,17 +1,18 @@
#pragma once
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/core/ConfigSections.h>
#include <xrpl/core/ServiceRegistry.h>
#include <xrpl/ledger/ReadView.h>
#include <xrpl/ledger/View.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Protocol.h>
#include <xrpl/protocol/STValidation.h>
#include <xrpl/shamap/SHAMap.h>
#include <optional>
namespace xrpl {
class ServiceRegistry;
/** The amendment table stores the list of enabled and potential amendments.
Individuals amendments are voted on by validators during the consensus
process.
@@ -22,7 +23,8 @@ public:
struct FeatureInfo
{
FeatureInfo() = delete;
FeatureInfo(std::string const& n, uint256 const& f, VoteBehavior v) : name(n), feature(f), vote(v)
FeatureInfo(std::string const& n, uint256 const& f, VoteBehavior v)
: name(n), feature(f), vote(v)
{
}
@@ -153,11 +155,13 @@ public:
Serializer s;
amendTx.add(s);
JLOG(j.debug()) << "Amendments: Adding pseudo-transaction: " << amendTx.getTransactionID() << ": "
<< strHex(s.slice()) << ": " << amendTx;
JLOG(j.debug()) << "Amendments: Adding pseudo-transaction: "
<< amendTx.getTransactionID() << ": " << strHex(s.slice()) << ": "
<< amendTx;
initialPosition->addGiveItem(
SHAMapNodeType::tnTRANSACTION_NM, make_shamapitem(amendTx.getTransactionID(), s.slice()));
SHAMapNodeType::tnTRANSACTION_NM,
make_shamapitem(amendTx.getTransactionID(), s.slice()));
}
}
};

View File

@@ -33,7 +33,8 @@ constexpr ApplyFlags
operator|(ApplyFlags const& lhs, ApplyFlags const& rhs)
{
return safe_cast<ApplyFlags>(
safe_cast<std::underlying_type_t<ApplyFlags>>(lhs) | safe_cast<std::underlying_type_t<ApplyFlags>>(rhs));
safe_cast<std::underlying_type_t<ApplyFlags>>(lhs) |
safe_cast<std::underlying_type_t<ApplyFlags>>(rhs));
}
static_assert((tapFAIL_HARD | tapRETRY) == safe_cast<ApplyFlags>(0x30u), "ApplyFlags operator |");
@@ -43,7 +44,8 @@ constexpr ApplyFlags
operator&(ApplyFlags const& lhs, ApplyFlags const& rhs)
{
return safe_cast<ApplyFlags>(
safe_cast<std::underlying_type_t<ApplyFlags>>(lhs) & safe_cast<std::underlying_type_t<ApplyFlags>>(rhs));
safe_cast<std::underlying_type_t<ApplyFlags>>(lhs) &
safe_cast<std::underlying_type_t<ApplyFlags>>(rhs));
}
static_assert((tapFAIL_HARD & tapRETRY) == tapNONE, "ApplyFlags operator &");
@@ -211,7 +213,11 @@ public:
// Called when a credit is made to an account
// This is required to support PaymentSandbox
virtual void
creditHook(AccountID const& from, AccountID const& to, STAmount const& amount, STAmount const& preCreditBalance)
creditHook(
AccountID const& from,
AccountID const& to,
STAmount const& amount,
STAmount const& preCreditBalance)
{
}

View File

@@ -33,7 +33,13 @@ public:
destructor.
*/
std::optional<TxMeta>
apply(OpenView& to, STTx const& tx, TER ter, std::optional<uint256> parentBatchId, bool isDryRun, beast::Journal j);
apply(
OpenView& to,
STTx const& tx,
TER ter,
std::optional<uint256> parentBatchId,
bool isDryRun,
beast::Journal j);
/** Set the amount of currency delivered.

View File

@@ -15,7 +15,11 @@ namespace xrpl {
*/
/** @{ */
STAmount
creditLimit(ReadView const& view, AccountID const& account, AccountID const& issuer, Currency const& currency);
creditLimit(
ReadView const& view,
AccountID const& account,
AccountID const& issuer,
Currency const& currency);
IOUAmount
creditLimit2(ReadView const& v, AccountID const& acc, AccountID const& iss, Currency const& cur);
@@ -29,7 +33,11 @@ creditLimit2(ReadView const& v, AccountID const& acc, AccountID const& iss, Curr
*/
/** @{ */
STAmount
creditBalance(ReadView const& view, AccountID const& account, AccountID const& issuer, Currency const& currency);
creditBalance(
ReadView const& view,
AccountID const& account,
AccountID const& issuer,
Currency const& currency);
/** @} */
} // namespace xrpl

View File

@@ -57,7 +57,9 @@ private:
std::shared_ptr<Serializer const> meta;
// Constructor needed for emplacement in std::map
txData(std::shared_ptr<Serializer const> const& txn_, std::shared_ptr<Serializer const> const& meta_)
txData(
std::shared_ptr<Serializer const> const& txn_,
std::shared_ptr<Serializer const> const& meta_)
: txn(txn_), meta(meta_)
{
}
@@ -130,7 +132,11 @@ public:
The tx list starts empty and will contain
all newly inserted tx.
*/
OpenView(open_ledger_t, ReadView const* base, Rules const& rules, std::shared_ptr<void const> hold = nullptr);
OpenView(
open_ledger_t,
ReadView const* base,
Rules const& rules,
std::shared_ptr<void const> hold = nullptr);
OpenView(open_ledger_t, Rules const& rules, std::shared_ptr<ReadView const> const& base)
: OpenView(open_ledger, &*base, rules, base)

View File

@@ -18,7 +18,8 @@ class DeferredCredits
public:
struct Adjustment
{
Adjustment(STAmount const& d, STAmount const& c, STAmount const& b) : debits(d), credits(c), origBalance(b)
Adjustment(STAmount const& d, STAmount const& c, STAmount const& b)
: debits(d), credits(c), origBalance(b)
{
}
STAmount debits;
@@ -119,7 +120,8 @@ public:
// or a PaymentSandbox-derived class, we MUST go through
// one of these constructors or invariants will be broken.
/** @{ */
explicit PaymentSandbox(PaymentSandbox const* base) : ApplyViewBase(base, base->flags()), ps_(base)
explicit PaymentSandbox(PaymentSandbox const* base)
: ApplyViewBase(base, base->flags()), ps_(base)
{
}
@@ -129,11 +131,15 @@ public:
/** @} */
STAmount
balanceHook(AccountID const& account, AccountID const& issuer, STAmount const& amount) const override;
balanceHook(AccountID const& account, AccountID const& issuer, STAmount const& amount)
const override;
void
creditHook(AccountID const& from, AccountID const& to, STAmount const& amount, STAmount const& preCreditBalance)
override;
creditHook(
AccountID const& from,
AccountID const& to,
STAmount const& amount,
STAmount const& preCreditBalance) override;
void
adjustOwnerCountHook(AccountID const& account, std::uint32_t cur, std::uint32_t next) override;

View File

@@ -247,7 +247,9 @@ Rules
makeRulesGivenLedger(DigestAwareReadView const& ledger, Rules const& current);
Rules
makeRulesGivenLedger(DigestAwareReadView const& ledger, std::unordered_set<uint256, beast::uhash<>> const& presets);
makeRulesGivenLedger(
DigestAwareReadView const& ledger,
std::unordered_set<uint256, beast::uhash<>> const& presets);
} // namespace xrpl

Some files were not shown because too many files have changed in this diff Show More