Compare commits

..

25 Commits

Author SHA1 Message Date
JCW
52becffa48 Fix issues 2025-08-28 23:04:56 +01:00
JCW
b5c4fd4c51 Fix issues 2025-08-28 22:41:41 +01:00
JCW
ffa323808d Fix issues 2025-08-28 21:55:14 +01:00
JCW
e7e800197e Performance improvement 2025-08-28 20:36:46 +01:00
JCW
6e35bb91ec Hardcode the log style as json
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:14 +01:00
JCW
276c02197f Fix PR comments
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:14 +01:00
Jingchen
fb228860c8 Update include/xrpl/basics/Log.h
Co-authored-by: Vito Tumas <5780819+Tapanito@users.noreply.github.com>
2025-08-27 16:04:14 +01:00
JCW
b6c2b5cec5 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:14 +01:00
JCW
516271e8fc Improve coverage
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
0d87dfbdb4 Remove unneeded file
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
f7b00a929b Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
9b3dd2c3b2 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
1a159e040e Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
56964984a5 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
0b31d52896 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:13 +01:00
JCW
5cf589af16 Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
2754c6343b Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
98bc036d1f Fix to_string error
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
429617e1ca Fix errors
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
a513f95fb5 Fix formatting
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
3740308b61 Support structured logs
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
f1625c9802 Support structured logs
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:12 +01:00
JCW
73bc28bf4f Support structured logs
Signed-off-by: JCW <a1q123456@users.noreply.github.com>
2025-08-27 16:04:11 +01:00
Bart
1240bae12b Update Conan dependencies: OpenSSL (#5617)
This change updates OpenSSL from 1.1.1w to 3.5.2. The code works as-is, but many functions have been marked as deprecated and thus will need to be rewritten. For now we explicitly add the `-DOPENSSL_SUPPRESS_DEPRECATED` to give us time to do so, while providing us with the benefits of the updated version.
2025-08-27 16:04:11 +01:00
Jingchen
ceb0ce5634 refactor: Decouple net from xrpld and move rpc-related classes to the rpc folder (#5477)
As a step of modularisation, this change moves code from `xrpld` to `libxrpl`.
2025-08-15 23:27:13 +00:00
84 changed files with 1996 additions and 919 deletions

91
.github/workflows/libxrpl.yml vendored Normal file
View File

@@ -0,0 +1,91 @@
name: Check libXRPL compatibility with Clio
env:
CONAN_REMOTE_URL: https://conan.ripplex.io
CONAN_LOGIN_USERNAME_XRPLF: ${{ secrets.CONAN_REMOTE_USERNAME }}
CONAN_PASSWORD_XRPLF: ${{ secrets.CONAN_REMOTE_PASSWORD }}
on:
pull_request:
paths:
- "src/libxrpl/protocol/BuildInfo.cpp"
- ".github/workflows/libxrpl.yml"
types: [opened, reopened, synchronize, ready_for_review]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
name: Publish libXRPL
outputs:
outcome: ${{ steps.upload.outputs.outcome }}
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.channel.outputs.channel }}
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
steps:
- name: Wait for essential checks to succeed
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
running-workflow-name: wait-for-check-regexp
check-regexp: "(dependencies|test).*linux.*" # Ignore windows and mac tests but make sure linux passes
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 10
- name: Checkout
uses: actions/checkout@v4
- name: Generate channel
id: channel
shell: bash
run: |
echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT}
- name: Export new package
shell: bash
run: |
conan export . ${{ steps.channel.outputs.channel }}
- name: Add Conan remote
shell: bash
run: |
echo "Adding Conan remote 'xrplf' at ${{ env.CONAN_REMOTE_URL }}."
conan remote add xrplf ${{ env.CONAN_REMOTE_URL }} --insert 0 --force
echo "Listing Conan remotes."
conan remote list
- name: Parse new version
id: version
shell: bash
run: |
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
- name: Try to authenticate to Conan remote
id: remote
shell: bash
run: |
# `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
echo outcome=$(conan user --remote xrplf --password >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
- name: Upload new package
id: upload
if: (steps.remote.outputs.outcome == 'success')
shell: bash
run: |
echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}"
echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
notify_clio:
name: Notify Clio
runs-on: ubuntu-latest
needs: publish
env:
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
steps:
- name: Notify Clio about new version
if: (needs.publish.outputs.outcome == 'success')
shell: bash
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" \
-F "client_payload[pr]=${{ github.event.pull_request.number }}"

View File

@@ -50,35 +50,30 @@ jobs:
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: Delete old build tools installed using Homebrew
- name: install Conan
run: |
brew uninstall --force \
cmake \
conan
- name: Install build tools using Homebrew
brew install conan
- name: install Ninja
if: matrix.generator == 'Ninja'
run: brew install ninja
- name: install python
run: |
brew install --quiet \
ca-certificates \
ninja \
python@3.14
- name: Remove old fmt using Homebrew
if which python > /dev/null 2>&1; then
echo "Python executable exists"
else
brew install python@3.13
ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python
fi
- name: install cmake
run: |
brew unlink fmt
brew cleanup
brew link fmt
- name: List software installed using Homebrew
run: brew list --version
- name: Install build tools using pip
shell: bash
if which cmake > /dev/null 2>&1; then
echo "cmake executable exists"
else
brew install cmake
fi
- name: install nproc
run: |
pip3 install --break-system-packages --upgrade pip
pip3 install --break-system-packages \
cmake==4.1.2 \
conan==2.22.1
brew install coreutils
- name: check environment
run: |
env | sort

View File

@@ -10,9 +10,6 @@ Loop: xrpld.app xrpld.core
Loop: xrpld.app xrpld.ledger
xrpld.app > xrpld.ledger
Loop: xrpld.app xrpld.net
xrpld.app > xrpld.net
Loop: xrpld.app xrpld.overlay
xrpld.overlay > xrpld.app
@@ -25,15 +22,9 @@ Loop: xrpld.app xrpld.rpc
Loop: xrpld.app xrpld.shamap
xrpld.app > xrpld.shamap
Loop: xrpld.core xrpld.net
xrpld.net > xrpld.core
Loop: xrpld.core xrpld.perflog
xrpld.perflog == xrpld.core
Loop: xrpld.net xrpld.rpc
xrpld.rpc ~= xrpld.net
Loop: xrpld.overlay xrpld.rpc
xrpld.rpc ~= xrpld.overlay

View File

@@ -2,6 +2,8 @@ libxrpl.basics > xrpl.basics
libxrpl.crypto > xrpl.basics
libxrpl.json > xrpl.basics
libxrpl.json > xrpl.json
libxrpl.net > xrpl.basics
libxrpl.net > xrpl.net
libxrpl.protocol > xrpl.basics
libxrpl.protocol > xrpl.json
libxrpl.protocol > xrpl.protocol
@@ -12,6 +14,9 @@ libxrpl.server > xrpl.basics
libxrpl.server > xrpl.json
libxrpl.server > xrpl.protocol
libxrpl.server > xrpl.server
libxrpl.telemetry > xrpl.basics
libxrpl.telemetry > xrpl.json
libxrpl.telemetry > xrpl.telemetry
test.app > test.jtx
test.app > test.rpc
test.app > test.toplevel
@@ -56,15 +61,16 @@ test.csf > xrpl.basics
test.csf > xrpld.consensus
test.csf > xrpl.json
test.csf > xrpl.protocol
test.csf > xrpl.telemetry
test.json > test.jtx
test.json > xrpl.json
test.jtx > xrpl.basics
test.jtx > xrpld.app
test.jtx > xrpld.core
test.jtx > xrpld.ledger
test.jtx > xrpld.net
test.jtx > xrpld.rpc
test.jtx > xrpl.json
test.jtx > xrpl.net
test.jtx > xrpl.protocol
test.jtx > xrpl.resource
test.jtx > xrpl.server
@@ -109,7 +115,6 @@ test.rpc > test.toplevel
test.rpc > xrpl.basics
test.rpc > xrpld.app
test.rpc > xrpld.core
test.rpc > xrpld.net
test.rpc > xrpld.overlay
test.rpc > xrpld.rpc
test.rpc > xrpl.json
@@ -133,15 +138,21 @@ test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
tests.libxrpl > xrpl.basics
tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.telemetry
xrpl.json > xrpl.basics
xrpl.net > xrpl.basics
xrpl.protocol > xrpl.basics
xrpl.protocol > xrpl.json
xrpl.resource > xrpl.basics
xrpl.resource > xrpl.json
xrpl.resource > xrpl.protocol
xrpl.resource > xrpl.telemetry
xrpl.server > xrpl.basics
xrpl.server > xrpl.json
xrpl.server > xrpl.protocol
xrpl.server > xrpl.telemetry
xrpl.telemetry > xrpl.json
xrpld.app > test.unit_test
xrpld.app > xrpl.basics
xrpld.app > xrpld.conditions
@@ -149,8 +160,10 @@ xrpld.app > xrpld.consensus
xrpld.app > xrpld.nodestore
xrpld.app > xrpld.perflog
xrpld.app > xrpl.json
xrpld.app > xrpl.net
xrpld.app > xrpl.protocol
xrpld.app > xrpl.resource
xrpld.app > xrpl.telemetry
xrpld.conditions > xrpl.basics
xrpld.conditions > xrpl.protocol
xrpld.consensus > xrpl.basics
@@ -158,14 +171,12 @@ xrpld.consensus > xrpl.json
xrpld.consensus > xrpl.protocol
xrpld.core > xrpl.basics
xrpld.core > xrpl.json
xrpld.core > xrpl.net
xrpld.core > xrpl.protocol
xrpld.core > xrpl.telemetry
xrpld.ledger > xrpl.basics
xrpld.ledger > xrpl.json
xrpld.ledger > xrpl.protocol
xrpld.net > xrpl.basics
xrpld.net > xrpl.json
xrpld.net > xrpl.protocol
xrpld.net > xrpl.resource
xrpld.nodestore > xrpl.basics
xrpld.nodestore > xrpld.core
xrpld.nodestore > xrpld.unity
@@ -189,6 +200,7 @@ xrpld.rpc > xrpld.core
xrpld.rpc > xrpld.ledger
xrpld.rpc > xrpld.nodestore
xrpld.rpc > xrpl.json
xrpld.rpc > xrpl.net
xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.resource
xrpld.rpc > xrpl.server

View File

@@ -16,13 +16,16 @@ set(CMAKE_CXX_EXTENSIONS OFF)
target_compile_definitions (common
INTERFACE
$<$<CONFIG:Debug>:DEBUG _DEBUG>
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
# defined, so no need to add it explicitly except for
# this special case of (profile ON) and (assert OFF)
# -- presumably this is because we don't want profile
# builds asserting unless asserts were specifically
# requested
#[===[
NOTE: CMAKE release builds already have NDEBUG defined, so no need to add it
explicitly except for the special case of (profile ON) and (assert OFF).
Presumably this is because we don't want profile builds asserting unless
asserts were specifically requested.
]===]
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>
# TODO: Remove once we have migrated functions from OpenSSL 1.x to 3.x.
OPENSSL_SUPPRESS_DEPRECATED
)
if (MSVC)
# remove existing exception flag since we set it to -EHa

View File

@@ -51,6 +51,8 @@ target_link_libraries(xrpl.libpb
# TODO: Clean up the number of library targets later.
add_library(xrpl.imports.main INTERFACE)
find_package(RapidJSON)
target_link_libraries(xrpl.imports.main
INTERFACE
LibArchive::LibArchive
@@ -75,6 +77,7 @@ add_module(xrpl beast)
target_link_libraries(xrpl.libxrpl.beast PUBLIC
xrpl.imports.main
xrpl.libpb
rapidjson
)
# Level 02
@@ -85,6 +88,7 @@ target_link_libraries(xrpl.libxrpl.basics PUBLIC xrpl.libxrpl.beast)
add_module(xrpl json)
target_link_libraries(xrpl.libxrpl.json PUBLIC xrpl.libxrpl.basics)
add_module(xrpl crypto)
target_link_libraries(xrpl.libxrpl.crypto PUBLIC xrpl.libxrpl.basics)
@@ -99,6 +103,15 @@ target_link_libraries(xrpl.libxrpl.protocol PUBLIC
add_module(xrpl resource)
target_link_libraries(xrpl.libxrpl.resource PUBLIC xrpl.libxrpl.protocol)
# Level 06
add_module(xrpl net)
target_link_libraries(xrpl.libxrpl.net PUBLIC
xrpl.libxrpl.basics
xrpl.libxrpl.json
xrpl.libxrpl.protocol
xrpl.libxrpl.resource
)
add_module(xrpl server)
target_link_libraries(xrpl.libxrpl.server PUBLIC xrpl.libxrpl.protocol)
@@ -121,6 +134,7 @@ target_link_modules(xrpl PUBLIC
protocol
resource
server
net
)
# All headers in libxrpl are in modules.

View File

@@ -19,6 +19,7 @@ install (
xrpl.libxrpl.protocol
xrpl.libxrpl.resource
xrpl.libxrpl.server
xrpl.libxrpl.net
xrpl.libxrpl
antithesis-sdk-cpp
EXPORT RippleExports

View File

@@ -26,9 +26,6 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% if compiler == "apple-clang" and compiler_version >= 17 %}
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "clang" and compiler_version == 16 %}
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags=['-Wno-restrict']
{% endif %}

View File

@@ -27,9 +27,10 @@ class Xrpl(ConanFile):
'grpc/1.50.1',
'libarchive/3.8.1',
'nudb/2.0.9',
'openssl/1.1.1w',
'openssl/3.5.2',
'soci/4.0.3',
'zlib/1.3.1',
"rapidjson/1.1.0"
]
test_requires = [
@@ -104,7 +105,7 @@ class Xrpl(ConanFile):
def requirements(self):
# Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
self.requires('boost/1.86.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True)

View File

@@ -187,7 +187,10 @@ public:
operator[](std::string const& name);
beast::Journal
journal(std::string const& name);
journal(
std::string const& name,
std::optional<beast::Journal::JsonLogAttributes> attributes =
std::nullopt);
beast::severities::Severity
threshold() const;
@@ -237,19 +240,19 @@ public:
static LogSeverity
fromString(std::string const& s);
private:
enum {
// Maximum line length for log messages.
// If the message exceeds this length it will be truncated with elipses.
maximumMessageCharacters = 12 * 1024
};
static void
format(
std::string& output,
std::string const& message,
beast::severities::Severity severity,
std::string const& partition);
private:
enum {
// Maximum line length for log messages.
// If the message exceeds this length it will be truncated with elipses.
maximumMessageCharacters = 12 * 1024
};
};
// Wraps a Journal::Stream to skip evaluation of

View File

@@ -21,7 +21,6 @@
#define RIPPLE_BASICS_SHAMAP_HASH_H_INCLUDED
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <ostream>

View File

@@ -90,9 +90,6 @@ public:
int
getCacheSize() const;
int
getTrackSize() const;
float
getHitRate();
@@ -170,9 +167,6 @@ public:
bool
retrieve(key_type const& key, T& data);
mutex_type&
peekMutex();
std::vector<key_type>
getKeys() const;
@@ -193,11 +187,14 @@ public:
private:
SharedPointerType
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l);
initialFetch(key_type const& key);
void
collect_metrics();
Mutex&
lockPartition(key_type const& key) const;
private:
struct Stats
{
@@ -300,8 +297,8 @@ private:
[[maybe_unused]] clock_type::time_point const& now,
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
std::atomic<int>& allRemoval,
Mutex& partitionLock);
[[nodiscard]] std::thread
sweepHelper(
@@ -310,14 +307,12 @@ private:
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&);
Mutex& partitionLock);
beast::Journal m_journal;
clock_type& m_clock;
Stats m_stats;
mutex_type mutable m_mutex;
// Used for logging
std::string m_name;
@@ -328,10 +323,11 @@ private:
clock_type::duration const m_target_age;
// Number of items cached
int m_cache_count;
std::atomic<int> m_cache_count;
cache_type m_cache; // Hold strong reference to recent objects
std::uint64_t m_hits;
std::uint64_t m_misses;
std::atomic<std::uint64_t> m_hits;
std::atomic<std::uint64_t> m_misses;
mutable std::vector<mutex_type> partitionLocks_;
};
} // namespace ripple

View File

@@ -22,6 +22,7 @@
#include <xrpl/basics/IntrusivePointer.ipp>
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/beast/core/CurrentThreadName.h>
namespace ripple {
@@ -60,6 +61,7 @@ inline TaggedCache<
, m_hits(0)
, m_misses(0)
{
partitionLocks_ = std::vector<mutex_type>(m_cache.partitions());
}
template <
@@ -105,8 +107,13 @@ TaggedCache<
KeyEqual,
Mutex>::size() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
std::size_t totalSize = 0;
for (size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
totalSize += m_cache.map()[i].size();
}
return totalSize;
}
template <
@@ -129,32 +136,7 @@ TaggedCache<
KeyEqual,
Mutex>::getCacheSize() const
{
std::lock_guard lock(m_mutex);
return m_cache_count;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline int
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::getTrackSize() const
{
std::lock_guard lock(m_mutex);
return m_cache.size();
return m_cache_count.load(std::memory_order_relaxed);
}
template <
@@ -177,9 +159,10 @@ TaggedCache<
KeyEqual,
Mutex>::getHitRate()
{
std::lock_guard lock(m_mutex);
auto const total = static_cast<float>(m_hits + m_misses);
return m_hits * (100.0f / std::max(1.0f, total));
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
float const total = float(hits + misses);
return hits * (100.0f / std::max(1.0f, total));
}
template <
@@ -202,9 +185,12 @@ TaggedCache<
KeyEqual,
Mutex>::clear()
{
std::lock_guard lock(m_mutex);
for (auto& mutex : partitionLocks_)
mutex.lock();
m_cache.clear();
m_cache_count = 0;
for (auto& mutex : partitionLocks_)
mutex.unlock();
m_cache_count.store(0, std::memory_order_relaxed);
}
template <
@@ -227,11 +213,9 @@ TaggedCache<
KeyEqual,
Mutex>::reset()
{
std::lock_guard lock(m_mutex);
m_cache.clear();
m_cache_count = 0;
m_hits = 0;
m_misses = 0;
clear();
m_hits.store(0, std::memory_order_relaxed);
m_misses.store(0, std::memory_order_relaxed);
}
template <
@@ -255,7 +239,7 @@ TaggedCache<
KeyEqual,
Mutex>::touch_if_exists(KeyComparable const& key)
{
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto const iter(m_cache.find(key));
if (iter == m_cache.end())
{
@@ -297,8 +281,6 @@ TaggedCache<
auto const start = std::chrono::steady_clock::now();
{
std::lock_guard lock(m_mutex);
if (m_target_size == 0 ||
(static_cast<int>(m_cache.size()) <= m_target_size))
{
@@ -330,12 +312,13 @@ TaggedCache<
m_cache.map()[p],
allStuffToSweep[p],
allRemovals,
lock));
partitionLocks_[p]));
}
for (std::thread& worker : workers)
worker.join();
m_cache_count -= allRemovals;
int removals = allRemovals.load(std::memory_order_relaxed);
m_cache_count.fetch_sub(removals, std::memory_order_relaxed);
}
// At this point allStuffToSweep will go out of scope outside the lock
// and decrement the reference count on each strong pointer.
@@ -369,7 +352,8 @@ TaggedCache<
{
// Remove from cache, if !valid, remove from map too. Returns true if
// removed from cache
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
@@ -382,7 +366,7 @@ TaggedCache<
if (entry.isCached())
{
--m_cache_count;
m_cache_count.fetch_sub(1, std::memory_order_relaxed);
entry.ptr.convertToWeak();
ret = true;
}
@@ -420,17 +404,16 @@ TaggedCache<
{
// Return canonical value, store if needed, refresh in cache
// Return values: true=we had the data already
std::lock_guard lock(m_mutex);
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
{
m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(m_clock.now(), data));
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false;
}
@@ -479,12 +462,12 @@ TaggedCache<
data = cachedData;
}
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return true;
}
entry.ptr = data;
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
return false;
}
@@ -560,10 +543,11 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& key)
{
std::lock_guard<mutex_type> l(m_mutex);
auto ret = initialFetch(key, l);
std::lock_guard<Mutex> lock(lockPartition(key));
auto ret = initialFetch(key);
if (!ret)
++m_misses;
m_misses.fetch_add(1, std::memory_order_relaxed);
return ret;
}
@@ -627,8 +611,8 @@ TaggedCache<
Mutex>::insert(key_type const& key)
-> std::enable_if_t<IsKeyCache, ReturnType>
{
std::lock_guard lock(m_mutex);
clock_type::time_point const now(m_clock.now());
std::lock_guard<Mutex> lock(lockPartition(key));
auto [it, inserted] = m_cache.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
@@ -668,29 +652,6 @@ TaggedCache<
return true;
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline auto
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::peekMutex() -> mutex_type&
{
return m_mutex;
}
template <
class Key,
class T,
@@ -714,10 +675,13 @@ TaggedCache<
std::vector<key_type> v;
{
std::lock_guard lock(m_mutex);
v.reserve(m_cache.size());
for (auto const& _ : m_cache)
v.push_back(_.first);
for (std::size_t i = 0; i < partitionLocks_.size(); ++i)
{
std::lock_guard<Mutex> lock(partitionLocks_[i]);
for (auto const& entry : m_cache.map()[i])
v.push_back(entry.first);
}
}
return v;
@@ -743,11 +707,12 @@ TaggedCache<
KeyEqual,
Mutex>::rate() const
{
std::lock_guard lock(m_mutex);
auto const tot = m_hits + m_misses;
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const tot = hits + misses;
if (tot == 0)
return 0;
return double(m_hits) / tot;
return 0.0;
return double(hits) / tot;
}
template <
@@ -771,18 +736,16 @@ TaggedCache<
KeyEqual,
Mutex>::fetch(key_type const& digest, Handler const& h)
{
{
std::lock_guard l(m_mutex);
if (auto ret = initialFetch(digest, l))
return ret;
}
std::lock_guard<Mutex> lock(lockPartition(digest));
if (auto ret = initialFetch(digest))
return ret;
auto sle = h();
if (!sle)
return {};
std::lock_guard l(m_mutex);
++m_misses;
m_misses.fetch_add(1, std::memory_order_relaxed);
auto const [it, inserted] =
m_cache.emplace(digest, Entry(m_clock.now(), std::move(sle)));
if (!inserted)
@@ -809,9 +772,10 @@ TaggedCache<
SharedPointerType,
Hash,
KeyEqual,
Mutex>::
initialFetch(key_type const& key, std::lock_guard<mutex_type> const& l)
Mutex>::initialFetch(key_type const& key)
{
std::lock_guard<Mutex> lock(lockPartition(key));
auto cit = m_cache.find(key);
if (cit == m_cache.end())
return {};
@@ -819,7 +783,7 @@ TaggedCache<
Entry& entry = cit->second;
if (entry.isCached())
{
++m_hits;
m_hits.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
@@ -827,12 +791,13 @@ TaggedCache<
if (entry.isCached())
{
// independent of cache size, so not counted as a hit
++m_cache_count;
m_cache_count.fetch_add(1, std::memory_order_relaxed);
entry.touch(m_clock.now());
return entry.ptr.getStrong();
}
m_cache.erase(cit);
return {};
}
@@ -861,10 +826,11 @@ TaggedCache<
{
beast::insight::Gauge::value_type hit_rate(0);
{
std::lock_guard lock(m_mutex);
auto const total(m_hits + m_misses);
auto const hits = m_hits.load(std::memory_order_relaxed);
auto const misses = m_misses.load(std::memory_order_relaxed);
auto const total = hits + misses;
if (total != 0)
hit_rate = (m_hits * 100) / total;
hit_rate = (hits * 100) / total;
}
m_stats.hit_rate.set(hit_rate);
}
@@ -895,12 +861,16 @@ TaggedCache<
typename KeyValueCacheType::map_type& partition,
SweptPointersVector& stuffToSweep,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
Mutex& partitionLock)
{
return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KVCache");
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
stuffToSweep.reserve(partition.size());
@@ -984,12 +954,16 @@ TaggedCache<
typename KeyOnlyCacheType::map_type& partition,
SweptPointersVector&,
std::atomic<int>& allRemovals,
std::lock_guard<std::recursive_mutex> const&)
Mutex& partitionLock)
{
return std::thread([&, this]() {
beast::setCurrentThreadName("sweep-KCache");
int cacheRemovals = 0;
int mapRemovals = 0;
std::lock_guard<Mutex> lock(partitionLock);
// Keep references to all the stuff we sweep
// so that we can destroy them outside the lock.
{
@@ -1024,6 +998,29 @@ TaggedCache<
});
}
template <
class Key,
class T,
bool IsKeyCache,
class SharedWeakUnionPointer,
class SharedPointerType,
class Hash,
class KeyEqual,
class Mutex>
inline Mutex&
TaggedCache<
Key,
T,
IsKeyCache,
SharedWeakUnionPointer,
SharedPointerType,
Hash,
KeyEqual,
Mutex>::lockPartition(key_type const& key) const
{
return partitionLocks_[m_cache.partition_index(key)];
}
} // namespace ripple
#endif

View File

@@ -277,6 +277,12 @@ public:
return map_;
}
partition_map_type const&
map() const
{
return map_;
}
iterator
begin()
{
@@ -321,6 +327,12 @@ public:
return cend();
}
std::size_t
partition_index(key_type const& key) const
{
return partitioner(key);
}
private:
template <class T>
void

View File

@@ -22,7 +22,61 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <rapidjson/document.h>
#include <deque>
#include <optional>
#include <source_location>
#include <sstream>
#include <utility>
namespace ripple::log {
template <typename T>
class LogParameter
{
public:
template <typename TArg>
LogParameter(char const* name, TArg&& value)
: name_(name), value_(std::forward<TArg>(value))
{
}
private:
char const* name_;
T value_;
template <typename U>
friend std::ostream&
operator<<(std::ostream& os, LogParameter<U> const&);
};
template <typename T>
class LogField
{
public:
template <typename TArg>
LogField(char const* name, TArg&& value)
: name_(name), value_(std::forward<TArg>(value))
{
}
private:
char const* name_;
T value_;
template <typename U>
friend std::ostream&
operator<<(std::ostream& os, LogField<U> const&);
};
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogField<T> const& param);
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogParameter<T> const& param);
} // namespace ripple::log
namespace beast {
@@ -42,6 +96,9 @@ enum Severity {
kDisabled,
kNone = kDisabled
};
std::string
to_string(Severity severity);
} // namespace severities
/** A generic endpoint for log messages.
@@ -59,18 +116,115 @@ enum Severity {
class Journal
{
public:
template <typename T>
friend std::ostream&
ripple::log::operator<<(
std::ostream& os,
ripple::log::LogField<T> const& param);
template <typename T>
friend std::ostream&
ripple::log::operator<<(
std::ostream& os,
ripple::log::LogParameter<T> const& param);
class Sink;
class JsonLogAttributes
{
public:
using AttributeFields = rapidjson::Value;
JsonLogAttributes();
JsonLogAttributes(JsonLogAttributes const& other);
JsonLogAttributes&
operator=(JsonLogAttributes const& other);
void
setModuleName(std::string const& name);
[[nodiscard]] static JsonLogAttributes
combine(AttributeFields const& a, AttributeFields const& b);
AttributeFields&
contextValues()
{
return contextValues_;
}
[[nodiscard]] AttributeFields const&
contextValues() const
{
return contextValues_;
}
rapidjson::MemoryPoolAllocator<>&
allocator()
{
return allocator_;
}
private:
AttributeFields contextValues_;
rapidjson::MemoryPoolAllocator<> allocator_;
friend class Journal;
};
struct JsonLogContext
{
std::source_location location = {};
rapidjson::Value messageParams;
rapidjson::MemoryPoolAllocator<> allocator;
JsonLogContext() = default;
void
reset(std::source_location location_) noexcept
{
location = location_;
messageParams = rapidjson::Value{};
messageParams.SetObject();
allocator.Clear();
}
};
private:
// Severity level / threshold of a Journal message.
using Severity = severities::Severity;
std::optional<JsonLogAttributes> m_attributes;
static std::optional<JsonLogAttributes> globalLogAttributes_;
static std::mutex globalLogAttributesMutex_;
static bool m_jsonLogsEnabled;
static thread_local JsonLogContext currentJsonLogContext_;
// Invariant: m_sink always points to a valid Sink
Sink* m_sink;
Sink* m_sink = nullptr;
static void
initMessageContext(std::source_location location);
static std::string
formatLog(
std::string const& message,
severities::Severity severity,
std::optional<JsonLogAttributes> const& attributes = std::nullopt);
public:
//--------------------------------------------------------------------------
static void
enableStructuredJournal();
static void
disableStructuredJournal();
static bool
isStructuredJournalEnabled();
/** Abstraction for the underlying message destination. */
class Sink
{
@@ -150,16 +304,25 @@ public:
{
public:
ScopedStream(ScopedStream const& other)
: ScopedStream(other.m_sink, other.m_level)
: ScopedStream(other.m_attributes, other.m_sink, other.m_level)
{
}
ScopedStream(Sink& sink, Severity level);
ScopedStream(
std::optional<JsonLogAttributes> attributes,
Sink& sink,
Severity level);
template <typename T>
ScopedStream(Stream const& stream, T const& t);
ScopedStream(
std::optional<JsonLogAttributes> attributes,
Stream const& stream,
T const& t);
ScopedStream(Stream const& stream, std::ostream& manip(std::ostream&));
ScopedStream(
std::optional<JsonLogAttributes> attributes,
Stream const& stream,
std::ostream& manip(std::ostream&));
ScopedStream&
operator=(ScopedStream const&) = delete;
@@ -180,6 +343,7 @@ public:
operator<<(T const& t) const;
private:
std::optional<JsonLogAttributes> m_attributes;
Sink& m_sink;
Severity const m_level;
std::ostringstream mutable m_ostream;
@@ -214,7 +378,11 @@ public:
Constructor is inlined so checking active() very inexpensive.
*/
Stream(Sink& sink, Severity level) : m_sink(sink), m_level(level)
Stream(
std::optional<JsonLogAttributes> attributes,
Sink& sink,
Severity level)
: m_attributes(std::move(attributes)), m_sink(sink), m_level(level)
{
XRPL_ASSERT(
m_level < severities::kDisabled,
@@ -222,7 +390,8 @@ public:
}
/** Construct or copy another Stream. */
Stream(Stream const& other) : Stream(other.m_sink, other.m_level)
Stream(Stream const& other)
: Stream(other.m_attributes, other.m_sink, other.m_level)
{
}
@@ -269,6 +438,7 @@ public:
/** @} */
private:
std::optional<JsonLogAttributes> m_attributes;
Sink& m_sink;
Severity m_level;
};
@@ -287,9 +457,54 @@ public:
/** Journal has no default constructor. */
Journal() = delete;
/** Create a journal that writes to the specified sink. */
explicit Journal(Sink& sink) : m_sink(&sink)
Journal(
Journal const& other,
std::optional<JsonLogAttributes> attributes = std::nullopt)
: m_sink(other.m_sink)
{
if (attributes.has_value())
m_attributes = std::move(attributes.value());
if (other.m_attributes.has_value())
{
if (m_attributes.has_value())
m_attributes = JsonLogAttributes::combine(
other.m_attributes->contextValues_,
m_attributes->contextValues_);
else
m_attributes = other.m_attributes;
}
}
/** Create a journal that writes to the specified sink. */
explicit Journal(
Sink& sink,
std::string const& name = {},
std::optional<JsonLogAttributes> attributes = std::nullopt)
: m_sink(&sink)
{
if (attributes)
{
m_attributes = std::move(attributes);
m_attributes->setModuleName(name);
}
}
Journal&
operator=(Journal const& other)
{
if (&other == this)
return *this;
m_sink = other.m_sink;
m_attributes = other.m_attributes;
return *this;
}
Journal&
operator=(Journal&& other) noexcept
{
m_sink = other.m_sink;
m_attributes = std::move(other.m_attributes);
return *this;
}
/** Returns the Sink associated with this Journal. */
@@ -303,7 +518,7 @@ public:
Stream
stream(Severity level) const
{
return Stream(*m_sink, level);
return Stream(m_attributes, *m_sink, level);
}
/** Returns `true` if any message would be logged at this severity level.
@@ -319,41 +534,75 @@ public:
/** Severity stream access functions. */
/** @{ */
Stream
trace() const
trace(std::source_location location = std::source_location::current()) const
{
return {*m_sink, severities::kTrace};
if (m_jsonLogsEnabled)
initMessageContext(location);
return {m_attributes, *m_sink, severities::kTrace};
}
Stream
debug() const
debug(std::source_location location = std::source_location::current()) const
{
return {*m_sink, severities::kDebug};
if (m_jsonLogsEnabled)
initMessageContext(location);
return {m_attributes, *m_sink, severities::kDebug};
}
Stream
info() const
info(std::source_location location = std::source_location::current()) const
{
return {*m_sink, severities::kInfo};
if (m_jsonLogsEnabled)
initMessageContext(location);
return {m_attributes, *m_sink, severities::kInfo};
}
Stream
warn() const
warn(std::source_location location = std::source_location::current()) const
{
return {*m_sink, severities::kWarning};
char const* a = "a";
rapidjson::Value v{a, 1};
if (m_jsonLogsEnabled)
initMessageContext(location);
return {m_attributes, *m_sink, severities::kWarning};
}
Stream
error() const
error(std::source_location location = std::source_location::current()) const
{
return {*m_sink, severities::kError};
if (m_jsonLogsEnabled)
initMessageContext(location);
return {m_attributes, *m_sink, severities::kError};
}
Stream
fatal() const
fatal(std::source_location location = std::source_location::current()) const
{
return {*m_sink, severities::kFatal};
if (m_jsonLogsEnabled)
initMessageContext(location);
return {m_attributes, *m_sink, severities::kFatal};
}
/** @} */
static void
resetGlobalAttributes()
{
std::lock_guard lock(globalLogAttributesMutex_);
globalLogAttributes_ = std::nullopt;
}
static void
addGlobalAttributes(JsonLogAttributes globalLogAttributes)
{
std::lock_guard lock(globalLogAttributesMutex_);
if (!globalLogAttributes_)
{
globalLogAttributes_ = JsonLogAttributes{};
}
globalLogAttributes_ = JsonLogAttributes::combine(
globalLogAttributes_->contextValues(),
globalLogAttributes.contextValues());
}
};
#ifndef __INTELLISENSE__
@@ -368,8 +617,11 @@ static_assert(std::is_nothrow_destructible<Journal>::value == true, "");
//------------------------------------------------------------------------------
template <typename T>
Journal::ScopedStream::ScopedStream(Journal::Stream const& stream, T const& t)
: ScopedStream(stream.sink(), stream.level())
Journal::ScopedStream::ScopedStream(
std::optional<JsonLogAttributes> attributes,
Stream const& stream,
T const& t)
: ScopedStream(std::move(attributes), stream.sink(), stream.level())
{
m_ostream << t;
}
@@ -388,7 +640,7 @@ template <typename T>
Journal::ScopedStream
Journal::Stream::operator<<(T const& t) const
{
return ScopedStream(*this, t);
return {m_attributes, *this, t};
}
namespace detail {
@@ -460,4 +712,133 @@ using logwstream = basic_logstream<wchar_t>;
} // namespace beast
namespace ripple::log {
namespace detail {
template <typename T>
void
setJsonValue(
rapidjson::Value& object,
rapidjson::MemoryPoolAllocator<>& allocator,
char const* name,
T&& value,
std::ostream* outStream)
{
using ValueType = std::decay_t<T>;
rapidjson::Value jsonValue;
if constexpr (std::constructible_from<
rapidjson::Value,
ValueType,
rapidjson::MemoryPoolAllocator<>&>)
{
jsonValue = rapidjson::Value{value, allocator};
if (outStream)
{
(*outStream) << value;
}
}
else if constexpr (std::constructible_from<rapidjson::Value, ValueType>)
{
jsonValue = rapidjson::Value{value};
if (outStream)
{
(*outStream) << value;
}
}
else if constexpr (std::same_as<ValueType, std::string>)
{
jsonValue = rapidjson::Value{value.c_str(), allocator};
if (outStream)
{
(*outStream) << value;
}
}
else
{
std::ostringstream oss;
oss << value;
jsonValue = rapidjson::Value{oss.str().c_str(), allocator};
if (outStream)
{
(*outStream) << oss.str();
}
}
object.AddMember(
rapidjson::StringRef(name), std::move(jsonValue), allocator);
}
} // namespace detail
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogParameter<T> const& param)
{
if (!beast::Journal::m_jsonLogsEnabled)
return os;
detail::setJsonValue(
beast::Journal::currentJsonLogContext_.messageParams,
beast::Journal::currentJsonLogContext_.allocator,
param.name_,
param.value_,
&os);
return os;
}
template <typename T>
std::ostream&
operator<<(std::ostream& os, LogField<T> const& param)
{
if (!beast::Journal::m_jsonLogsEnabled)
return os;
detail::setJsonValue(
beast::Journal::currentJsonLogContext_.messageParams,
beast::Journal::currentJsonLogContext_.allocator,
param.name_,
param.value_,
nullptr);
return os;
}
template <typename T>
LogParameter<T>
param(char const* name, T&& value)
{
return LogParameter<T>{name, std::forward<T>(value)};
}
template <typename T>
LogField<T>
field(char const* name, T&& value)
{
return LogField<T>{name, std::forward<T>(value)};
}
template <typename... Pair>
[[nodiscard]] beast::Journal::JsonLogAttributes
attributes(Pair&&... pairs)
{
beast::Journal::JsonLogAttributes result;
(detail::setJsonValue(
result.contextValues(),
result.allocator(),
pairs.first,
pairs.second,
nullptr),
...);
return result;
}
template <typename T>
[[nodiscard]] std::pair<char const*, std::decay_t<T>>
attr(char const* name, T&& value)
{
return std::make_pair(name, std::forward<T>(value));
}
} // namespace ripple::log
#endif

View File

@@ -20,9 +20,8 @@
#ifndef RIPPLE_NET_HTTPCLIENT_H_INCLUDED
#define RIPPLE_NET_HTTPCLIENT_H_INCLUDED
#include <xrpld/core/Config.h>
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/beast/utility/Journal.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/streambuf.hpp>
@@ -44,7 +43,11 @@ public:
static constexpr auto maxClientHeaderBytes = kilobytes(32);
static void
initializeSSLContext(Config const& config, beast::Journal j);
initializeSSLContext(
std::string const& sslVerifyDir,
std::string const& sslVerifyFile,
bool sslVerify,
beast::Journal j);
static void
get(bool bSSL,

View File

@@ -20,11 +20,10 @@
#ifndef RIPPLE_NET_HTTPCLIENTSSLCONTEXT_H_INCLUDED
#define RIPPLE_NET_HTTPCLIENTSSLCONTEXT_H_INCLUDED
#include <xrpld/core/Config.h>
#include <xrpld/net/RegisterSSLCerts.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/contract.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/net/RegisterSSLCerts.h>
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
@@ -37,31 +36,33 @@ class HTTPClientSSLContext
{
public:
explicit HTTPClientSSLContext(
Config const& config,
std::string const& sslVerifyDir,
std::string const& sslVerifyFile,
bool sslVerify,
beast::Journal j,
boost::asio::ssl::context_base::method method =
boost::asio::ssl::context::sslv23)
: ssl_context_{method}, j_(j), verify_{config.SSL_VERIFY}
: ssl_context_{method}, j_(j), verify_{sslVerify}
{
boost::system::error_code ec;
if (config.SSL_VERIFY_FILE.empty())
if (sslVerifyFile.empty())
{
registerSSLCerts(ssl_context_, ec, j_);
if (ec && config.SSL_VERIFY_DIR.empty())
if (ec && sslVerifyDir.empty())
Throw<std::runtime_error>(boost::str(
boost::format("Failed to set_default_verify_paths: %s") %
ec.message()));
}
else
{
ssl_context_.load_verify_file(config.SSL_VERIFY_FILE);
ssl_context_.load_verify_file(sslVerifyFile);
}
if (!config.SSL_VERIFY_DIR.empty())
if (!sslVerifyDir.empty())
{
ssl_context_.add_verify_path(config.SSL_VERIFY_DIR, ec);
ssl_context_.add_verify_path(sslVerifyDir, ec);
if (ec)
Throw<std::runtime_error>(boost::str(

View File

@@ -22,7 +22,6 @@
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/basics/partitioned_unordered_map.h>
#include <cstdint>
@@ -56,10 +55,7 @@ std::size_t constexpr oversizeMetaDataCap = 5200;
/** The maximum number of entries per directory page */
std::size_t constexpr dirNodeMaxEntries = 32;
/** The maximum number of pages allowed in a directory
Made obsolete by fixDirectoryLimit amendment.
*/
/** The maximum number of pages allowed in a directory */
std::uint64_t constexpr dirNodeMaxPages = 262144;
/** The maximum number of items in an NFT page */

View File

@@ -29,8 +29,9 @@
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
// If you add an amendment here, then do not forget to increment `numFeatures`
// in include/xrpl/protocol/Feature.h.
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PriceOracleOrder, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (MPTDeliveredAmount, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (AMMClawbackRounding, Supported::no, VoteBehavior::DefaultNo)
@@ -40,7 +41,7 @@ XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegation, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)

View File

@@ -132,7 +132,8 @@ public:
}
}
JLOG(m_journal.debug()) << "New inbound endpoint " << *entry;
JLOG(m_journal.debug())
<< "New inbound endpoint " << log::param("Entry", *entry);
return Consumer(*this, *entry);
}
@@ -160,7 +161,8 @@ public:
}
}
JLOG(m_journal.debug()) << "New outbound endpoint " << *entry;
JLOG(m_journal.debug())
<< "New outbound endpoint " << log::param("Entry", *entry);
return Consumer(*this, *entry);
}
@@ -193,7 +195,8 @@ public:
}
}
JLOG(m_journal.debug()) << "New unlimited endpoint " << *entry;
JLOG(m_journal.debug())
<< "New unlimited endpoint " << log::param("Entry", *entry);
return Consumer(*this, *entry);
}
@@ -350,7 +353,8 @@ public:
{
if (iter->whenExpires <= elapsed)
{
JLOG(m_journal.debug()) << "Expired " << *iter;
JLOG(m_journal.debug())
<< "Expired " << log::param("Entry", *iter);
auto table_iter = table_.find(*iter->key);
++iter;
erase(table_iter);
@@ -422,7 +426,9 @@ public:
std::lock_guard _(lock_);
if (--entry.refcount == 0)
{
JLOG(m_journal.debug()) << "Inactive " << entry;
JLOG(m_journal.debug())
<< "Inactive " << log::param("Entry", entry);
;
switch (entry.key->kind)
{
@@ -474,7 +480,8 @@ public:
clock_type::time_point const now(m_clock.now());
int const balance(entry.add(fee.cost(), now));
JLOG(getStream(fee.cost(), m_journal))
<< "Charging " << entry << " for " << fee << context;
<< "Charging " << log::param("Entry", entry) << " for "
<< log::param("Fee", fee) << context;
return disposition(balance);
}
@@ -496,7 +503,9 @@ public:
}
if (notify)
{
JLOG(m_journal.info()) << "Load warning: " << entry;
JLOG(m_journal.info())
<< "Load warning: " << log::param("Entry", entry);
;
++m_stats.warn;
}
return notify;
@@ -515,8 +524,10 @@ public:
if (balance >= dropThreshold)
{
JLOG(m_journal.warn())
<< "Consumer entry " << entry << " dropped with balance "
<< balance << " at or above drop threshold " << dropThreshold;
<< "Consumer entry " << log::param("Entry", entry)
<< " dropped with balance " << log::param("Entry", balance)
<< " at or above drop threshold "
<< log::param("Entry", dropThreshold);
// Adding feeDrop at this point keeps the dropped connection
// from re-connecting for at least a little while after it is

View File

@@ -47,7 +47,6 @@ protected:
Port const& port_;
Handler& handler_;
endpoint_type remote_address_;
beast::WrappedSink sink_;
beast::Journal const j_;
boost::asio::executor_work_guard<boost::asio::executor> work_;
@@ -84,13 +83,13 @@ BasePeer<Handler, Impl>::BasePeer(
: port_(port)
, handler_(handler)
, remote_address_(remote_address)
, sink_(
journal.sink(),
[] {
static std::atomic<unsigned> id{0};
return "##" + std::to_string(++id) + " ";
}())
, j_(sink_)
, j_(journal,
log::attributes(log::attr(
"PeerID",
[] {
static std::atomic<unsigned> id{0};
return "##" + std::to_string(++id) + " ";
}())))
, work_(executor)
, strand_(executor)
{

View File

@@ -157,9 +157,11 @@ Logs::operator[](std::string const& name)
}
beast::Journal
Logs::journal(std::string const& name)
Logs::journal(
std::string const& name,
std::optional<beast::Journal::JsonLogAttributes> attributes)
{
return beast::Journal(get(name));
return beast::Journal{get(name), name, std::move(attributes)};
}
beast::severities::Severity
@@ -332,36 +334,39 @@ Logs::format(
{
output.reserve(message.size() + partition.size() + 100);
output = to_string(std::chrono::system_clock::now());
output += " ";
if (!partition.empty())
output += partition + ":";
using namespace beast::severities;
switch (severity)
if (!beast::Journal::isStructuredJournalEnabled())
{
case kTrace:
output += "TRC ";
break;
case kDebug:
output += "DBG ";
break;
case kInfo:
output += "NFO ";
break;
case kWarning:
output += "WRN ";
break;
case kError:
output += "ERR ";
break;
default:
UNREACHABLE("ripple::Logs::format : invalid severity");
[[fallthrough]];
case kFatal:
output += "FTL ";
break;
output = to_string(std::chrono::system_clock::now());
output += " ";
if (!partition.empty())
output += partition + ":";
using namespace beast::severities;
switch (severity)
{
case kTrace:
output += "TRC ";
break;
case kDebug:
output += "DBG ";
break;
case kInfo:
output += "NFO ";
break;
case kWarning:
output += "WRN ";
break;
case kError:
output += "ERR ";
break;
default:
UNREACHABLE("ripple::Logs::format : invalid severity");
[[fallthrough]];
case kFatal:
output += "FTL ";
break;
}
}
output += message;

View File

@@ -19,12 +19,23 @@
#include <xrpl/beast/utility/Journal.h>
#include <rapidjson/document.h>
#include <rapidjson/stringbuffer.h>
#include <rapidjson/writer.h>
#include <ios>
#include <ostream>
#include <ranges>
#include <string>
#include <thread>
namespace beast {
std::optional<Journal::JsonLogAttributes> Journal::globalLogAttributes_;
std::mutex Journal::globalLogAttributesMutex_;
bool Journal::m_jsonLogsEnabled = false;
thread_local Journal::JsonLogContext Journal::currentJsonLogContext_{};
//------------------------------------------------------------------------------
// A Sink that does nothing.
@@ -87,6 +98,214 @@ Journal::getNullSink()
//------------------------------------------------------------------------------
std::string
severities::to_string(Severity severity)
{
switch (severity)
{
case kDisabled:
return "disabled";
case kTrace:
return "trace";
case kDebug:
return "debug";
case kInfo:
return "info";
case kWarning:
return "warning";
case kError:
return "error";
case kFatal:
return "fatal";
default:
UNREACHABLE("Unexpected severity value!");
}
return "";
}
Journal::JsonLogAttributes::JsonLogAttributes()
{
contextValues_.SetObject();
}
Journal::JsonLogAttributes::JsonLogAttributes(JsonLogAttributes const& other)
{
contextValues_.SetObject();
contextValues_.CopyFrom(other.contextValues_, allocator_);
}
Journal::JsonLogAttributes&
Journal::JsonLogAttributes::operator=(JsonLogAttributes const& other)
{
if (&other == this)
{
return *this;
}
contextValues_.CopyFrom(other.contextValues_, allocator_);
return *this;
}
void
Journal::JsonLogAttributes::setModuleName(std::string const& name)
{
contextValues_.AddMember(
rapidjson::StringRef("Module"),
rapidjson::Value{name.c_str(), allocator_},
allocator_);
}
Journal::JsonLogAttributes
Journal::JsonLogAttributes::combine(
AttributeFields const& a,
AttributeFields const& b)
{
JsonLogAttributes result;
result.contextValues_.CopyFrom(a, result.allocator_);
for (auto& member : b.GetObject())
{
auto val = rapidjson::Value{member.value, result.allocator_};
if (result.contextValues_.HasMember(member.name))
{
result.contextValues_[member.name] = std::move(val);
}
else
{
result.contextValues_.AddMember(
rapidjson::Value{member.name, result.allocator_},
std::move(val),
result.allocator_);
}
}
return result;
}
void
Journal::initMessageContext(std::source_location location)
{
currentJsonLogContext_.reset(location);
}
std::string
Journal::formatLog(
std::string const& message,
severities::Severity severity,
std::optional<JsonLogAttributes> const& attributes)
{
if (!m_jsonLogsEnabled)
{
return message;
}
rapidjson::Document doc{&currentJsonLogContext_.allocator};
rapidjson::Value logContext;
logContext.SetObject();
logContext.AddMember(
rapidjson::StringRef("Function"),
rapidjson::StringRef(currentJsonLogContext_.location.function_name()),
currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::StringRef("File"),
rapidjson::StringRef(currentJsonLogContext_.location.file_name()),
currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::StringRef("Line"),
currentJsonLogContext_.location.line(),
currentJsonLogContext_.allocator);
std::stringstream threadIdStream;
threadIdStream << std::this_thread::get_id();
auto threadIdStr = threadIdStream.str();
logContext.AddMember(
rapidjson::StringRef("ThreadId"),
rapidjson::StringRef(threadIdStr.c_str()),
currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::StringRef("Params"),
std::move(currentJsonLogContext_.messageParams),
currentJsonLogContext_.allocator);
currentJsonLogContext_.messageParams = rapidjson::Value{};
currentJsonLogContext_.messageParams.SetObject();
auto severityStr = to_string(severity);
logContext.AddMember(
rapidjson::StringRef("Level"),
rapidjson::StringRef(severityStr.c_str()),
currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::StringRef("Message"),
rapidjson::StringRef(message.c_str()),
currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::StringRef("Time"),
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch())
.count(),
currentJsonLogContext_.allocator);
if (attributes.has_value())
{
for (auto const& [key, value] : attributes->contextValues().GetObject())
{
if (logContext.HasMember(key))
continue;
rapidjson::Value jsonValue;
jsonValue.CopyFrom(value, currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::Value{key, currentJsonLogContext_.allocator},
std::move(jsonValue),
currentJsonLogContext_.allocator);
}
}
if (globalLogAttributes_)
{
for (auto const& [key, value] :
globalLogAttributes_->contextValues().GetObject())
{
if (logContext.HasMember(key))
continue;
rapidjson::Value jsonValue;
jsonValue.CopyFrom(value, currentJsonLogContext_.allocator);
logContext.AddMember(
rapidjson::Value{key, currentJsonLogContext_.allocator},
std::move(jsonValue),
currentJsonLogContext_.allocator);
}
}
rapidjson::StringBuffer buffer;
rapidjson::Writer writer(buffer);
logContext.Accept(writer);
return {buffer.GetString()};
}
void
Journal::enableStructuredJournal()
{
m_jsonLogsEnabled = true;
}
void
Journal::disableStructuredJournal()
{
m_jsonLogsEnabled = false;
resetGlobalAttributes();
}
bool
Journal::isStructuredJournalEnabled()
{
return m_jsonLogsEnabled;
}
Journal::Sink::Sink(Severity thresh, bool console)
: thresh_(thresh), m_console(console)
{
@@ -126,17 +345,21 @@ Journal::Sink::threshold(Severity thresh)
//------------------------------------------------------------------------------
Journal::ScopedStream::ScopedStream(Sink& sink, Severity level)
: m_sink(sink), m_level(level)
Journal::ScopedStream::ScopedStream(
std::optional<JsonLogAttributes> attributes,
Sink& sink,
Severity level)
: m_attributes(std::move(attributes)), m_sink(sink), m_level(level)
{
// Modifiers applied from all ctors
m_ostream << std::boolalpha << std::showbase;
}
Journal::ScopedStream::ScopedStream(
std::optional<JsonLogAttributes> attributes,
Stream const& stream,
std::ostream& manip(std::ostream&))
: ScopedStream(stream.sink(), stream.level())
: ScopedStream(std::move(attributes), stream.sink(), stream.level())
{
m_ostream << manip;
}
@@ -147,9 +370,9 @@ Journal::ScopedStream::~ScopedStream()
if (!s.empty())
{
if (s == "\n")
m_sink.write(m_level, "");
m_sink.write(m_level, formatLog("", m_level, m_attributes));
else
m_sink.write(m_level, s);
m_sink.write(m_level, formatLog(s, m_level, m_attributes));
}
}
@@ -164,7 +387,7 @@ Journal::ScopedStream::operator<<(std::ostream& manip(std::ostream&)) const
Journal::ScopedStream
Journal::Stream::operator<<(std::ostream& manip(std::ostream&)) const
{
return ScopedStream(*this, manip);
return {m_attributes, *this, manip};
}
} // namespace beast

View File

@@ -17,12 +17,11 @@
*/
//==============================================================================
#include <xrpld/net/AutoSocket.h>
#include <xrpld/net/HTTPClient.h>
#include <xrpld/net/HTTPClientSSLContext.h>
#include <xrpl/basics/Log.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/net/AutoSocket.h>
#include <xrpl/net/HTTPClient.h>
#include <xrpl/net/HTTPClientSSLContext.h>
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
@@ -36,9 +35,13 @@ namespace ripple {
static std::optional<HTTPClientSSLContext> httpClientSSLContext;
void
HTTPClient::initializeSSLContext(Config const& config, beast::Journal j)
HTTPClient::initializeSSLContext(
std::string const& sslVerifyDir,
std::string const& sslVerifyFile,
bool sslVerify,
beast::Journal j)
{
httpClientSSLContext.emplace(config, j);
httpClientSSLContext.emplace(sslVerifyDir, sslVerifyFile, sslVerify, j);
}
//------------------------------------------------------------------------------

View File

@@ -17,7 +17,7 @@
*/
//==============================================================================
#include <xrpld/net/RegisterSSLCerts.h>
#include <xrpl/net/RegisterSSLCerts.h>
#if BOOST_OS_WINDOWS
#include <boost/asio/ssl/error.hpp>

View File

Before

Width:  |  Height:  |  Size: 197 KiB

After

Width:  |  Height:  |  Size: 197 KiB

View File

Before

Width:  |  Height:  |  Size: 117 KiB

After

Width:  |  Height:  |  Size: 117 KiB

View File

@@ -36,7 +36,7 @@ namespace BuildInfo {
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "2.6.2"
char const* const versionString = "2.6.0-rc2"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)

View File

@@ -568,39 +568,6 @@ struct Credentials_test : public beast::unit_test::suite
jle[jss::result][jss::node]["CredentialType"] ==
strHex(std::string_view(credType)));
}
{
testcase("Credentials fail, directory full");
std::uint32_t const issuerSeq{env.seq(issuer) + 1};
env(ticket::create(issuer, 63));
env.close();
// Everything below can only be tested on open ledger.
auto const res1 = directory::bumpLastPage(
env,
directory::maximumPageIndex(env),
keylet::ownerDir(issuer.id()),
directory::adjustOwnerNode);
BEAST_EXPECT(res1);
auto const jv = credentials::create(issuer, subject, credType);
env(jv, ter(tecDIR_FULL));
// Free one directory entry by using a ticket
env(noop(issuer), ticket::use(issuerSeq + 40));
// Fill subject directory
env(ticket::create(subject, 63));
auto const res2 = directory::bumpLastPage(
env,
directory::maximumPageIndex(env),
keylet::ownerDir(subject.id()),
directory::adjustOwnerNode);
BEAST_EXPECT(res2);
env(jv, ter(tecDIR_FULL));
// End test
env.close();
}
}
{
@@ -1127,7 +1094,6 @@ struct Credentials_test : public beast::unit_test::suite
testSuccessful(all);
testCredentialsDelete(all);
testCreateFailed(all);
testCreateFailed(all - fixDirectoryLimit);
testAcceptFailed(all);
testDeleteFailed(all);
testFeatureFailed(all - featureCredentials);

View File

@@ -1,80 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2020 Dev Null Productions
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx.h>
#include <test/jtx/CaptureLogs.h>
#include <test/jtx/Env.h>
#include <xrpld/app/misc/HashRouter.h>
namespace ripple {
namespace test {
class NetworkOPs_test : public beast::unit_test::suite
{
public:
void
run() override
{
testAllBadHeldTransactions();
}
void
testAllBadHeldTransactions()
{
// All trasactions are already marked as SF_BAD, and we should be able
// to handle the case properly without an assertion failure
testcase("No valid transactions in batch");
std::string logs;
{
using namespace jtx;
auto const alice = Account{"alice"};
Env env{
*this,
envconfig(),
std::make_unique<CaptureLogs>(&logs),
beast::severities::kAll};
env.memoize(env.master);
env.memoize(alice);
auto const jtx = env.jt(ticket::create(alice, 1), seq(1), fee(10));
auto transacionId = jtx.stx->getTransactionID();
env.app().getHashRouter().setFlags(
transacionId, HashRouterFlags::HELD);
env(jtx, json(jss::Sequence, 1), ter(terNO_ACCOUNT));
env.app().getHashRouter().setFlags(
transacionId, HashRouterFlags::BAD);
env.close();
}
BEAST_EXPECT(
logs.find("No transaction to process!") != std::string::npos);
}
};
BEAST_DEFINE_TESTSUITE(NetworkOPs, app, ripple);
} // namespace test
} // namespace ripple

View File

@@ -58,10 +58,10 @@ public:
// Insert an item, retrieve it, and age it so it gets purged.
{
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
BEAST_EXPECT(!c.insert(1, "one"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
std::string s;
@@ -72,7 +72,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
// Insert an item, maintain a strong pointer, age it, and
@@ -80,7 +80,7 @@ public:
{
BEAST_EXPECT(!c.insert(2, "two"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
auto p = c.fetch(2);
@@ -88,14 +88,14 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
}
// Make sure its gone now that our reference is gone
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
// Insert the same key/value pair and make sure we get the same result
@@ -111,7 +111,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
// Put an object in but keep a strong pointer to it, advance the clock a
@@ -121,24 +121,24 @@ public:
// Put an object in
BEAST_EXPECT(!c.insert(4, "four"));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
{
// Keep a strong pointer to it
auto const p1 = c.fetch(4);
BEAST_EXPECT(p1 != nullptr);
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Advance the clock a lot
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Canonicalize a new object with the same key
auto p2 = std::make_shared<std::string>("four");
BEAST_EXPECT(c.canonicalize_replace_client(4, p2));
BEAST_EXPECT(c.getCacheSize() == 1);
BEAST_EXPECT(c.getTrackSize() == 1);
BEAST_EXPECT(c.size() == 1);
// Make sure we get the original object
BEAST_EXPECT(p1.get() == p2.get());
}
@@ -146,7 +146,7 @@ public:
++clock;
c.sweep();
BEAST_EXPECT(c.getCacheSize() == 0);
BEAST_EXPECT(c.getTrackSize() == 0);
BEAST_EXPECT(c.size() == 0);
}
}
};

View File

@@ -175,12 +175,74 @@ public:
BEAST_EXPECT(*lv == -1);
}
void
test_yield_and_stop()
{
using namespace std::chrono_literals;
using namespace jtx;
testcase("yield and stop");
Env env(*this, envconfig([](std::unique_ptr<Config> cfg) {
cfg->FORCE_MULTI_THREAD = true;
return cfg;
}));
std::shared_ptr<JobQueue::Coro> c;
std::mutex mutexStop;
std::mutex mutexYield;
std::condition_variable cond;
std::condition_variable condYield;
bool yielded = false;
bool stopped = false;
env.app().getJobQueue().postCoro(
jtCLIENT, "Coroutine-Test", [&](auto const& cr) {
c = cr;
{
std::unique_lock lock(mutexYield);
yielded = true;
condYield.notify_all();
}
c->yield();
// Just to keep this job alive
std::this_thread::sleep_for(5ms);
});
std::thread th{[&]() {
std::unique_lock lock(mutexStop);
cond.wait(lock, [&]() { return stopped; });
// Delay a bit to wait for stop() to be called
std::this_thread::sleep_for(1ms);
c->post();
}};
// Delay a bit to wait for yield() to be called
std::this_thread::sleep_for(1ms);
std::unique_lock lockYield(mutexYield);
condYield.wait(lockYield, [&]() { return yielded; });
{
std::unique_lock lock(mutexStop);
stopped = true;
cond.notify_all();
}
env.app().getJobQueue().stop();
try
{
th.join();
}
catch (std::exception const& e)
{
}
pass();
}
void
run() override
{
correct_order();
incorrect_order();
thread_specific_storage();
// test_yield_and_stop();
}
};

View File

@@ -178,7 +178,6 @@ struct Peer
using NodeKey = Validation::NodeKey;
//! Logging support that prefixes messages with the peer ID
beast::WrappedSink sink;
beast::Journal j;
//! Generic consensus
@@ -284,8 +283,7 @@ struct Peer
TrustGraph<Peer*>& tg,
CollectorRefs& c,
beast::Journal jIn)
: sink(jIn, "Peer " + to_string(i) + ": ")
, j(sink)
: j(jIn, log::attributes(log::attr("Peer", "Peer " + to_string(i))))
, consensus(s.clock(), *this, j)
, id{i}
, key{id, 0}

View File

@@ -39,7 +39,6 @@
#include <test/jtx/delivermin.h>
#include <test/jtx/deposit.h>
#include <test/jtx/did.h>
#include <test/jtx/directory.h>
#include <test/jtx/domain.h>
#include <test/jtx/escrow.h>
#include <test/jtx/fee.h>

View File

@@ -1,81 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_TEST_JTX_DIRECTORY_H_INCLUDED
#define RIPPLE_TEST_JTX_DIRECTORY_H_INCLUDED
#include <test/jtx/Env.h>
#include <xrpl/basics/Expected.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Indexes.h>
#include <cstdint>
#include <limits>
namespace ripple::test::jtx {
/** Directory operations. */
namespace directory {
enum Error {
DirectoryRootNotFound,
DirectoryTooSmall,
DirectoryPageDuplicate,
DirectoryPageNotFound,
InvalidLastPage,
AdjustmentError
};
/// Move the position of the last page in the user's directory on open ledger to
/// newLastPage. Requirements:
/// - directory must have at least two pages (root and one more)
/// - adjust should be used to update owner nodes of the objects affected
/// - newLastPage must be greater than index of the last page in the directory
///
/// Use this to test tecDIR_FULL errors in open ledger.
/// NOTE: effects will be DISCARDED on env.close()
auto
bumpLastPage(
Env& env,
std::uint64_t newLastPage,
Keylet directory,
std::function<bool(ApplyView&, uint256, std::uint64_t)> adjust)
-> Expected<void, Error>;
/// Implementation of adjust for the most common ledger entry, i.e. one where
/// page index is stored in sfOwnerNode (and only there). Pass this function
/// to bumpLastPage if the last page of directory has only objects
/// of this kind (e.g. ticket, DID, offer, deposit preauth, MPToken etc.)
bool
adjustOwnerNode(ApplyView& view, uint256 key, std::uint64_t page);
inline auto
maximumPageIndex(Env const& env) -> std::uint64_t
{
if (env.enabled(fixDirectoryLimit))
return std::numeric_limits<std::uint64_t>::max();
return dirNodeMaxPages - 1;
}
} // namespace directory
} // namespace ripple::test::jtx
#endif

View File

@@ -30,12 +30,12 @@
#include <xrpld/app/ledger/LedgerMaster.h>
#include <xrpld/app/misc/NetworkOPs.h>
#include <xrpld/net/HTTPClient.h>
#include <xrpld/net/RPCCall.h>
#include <xrpld/rpc/RPCCall.h>
#include <xrpl/basics/Slice.h>
#include <xrpl/basics/contract.h>
#include <xrpl/json/to_string.h>
#include <xrpl/net/HTTPClient.h>
#include <xrpl/protocol/ErrorCodes.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/Serializer.h>
@@ -74,7 +74,11 @@ Env::AppBundle::AppBundle(
auto timeKeeper_ = std::make_unique<ManualTimeKeeper>();
timeKeeper = timeKeeper_.get();
// Hack so we don't have to call Config::setup
HTTPClient::initializeSSLContext(*config, debugLog());
HTTPClient::initializeSSLContext(
config->SSL_VERIFY_DIR,
config->SSL_VERIFY_FILE,
config->SSL_VERIFY,
debugLog());
owned = make_Application(
std::move(config), std::move(logs), std::move(timeKeeper_));
app = owned.get();

View File

@@ -1,145 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2025 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/jtx/directory.h>
#include <xrpld/ledger/Sandbox.h>
namespace ripple::test::jtx {
/** Directory operations. */
namespace directory {
auto
bumpLastPage(
Env& env,
std::uint64_t newLastPage,
Keylet directory,
std::function<bool(ApplyView&, uint256, std::uint64_t)> adjust)
-> Expected<void, Error>
{
Expected<void, Error> res{};
env.app().openLedger().modify(
[&](OpenView& view, beast::Journal j) -> bool {
Sandbox sb(&view, tapNONE);
// Find the root page
auto sleRoot = sb.peek(directory);
if (!sleRoot)
{
res = Unexpected<Error>(DirectoryRootNotFound);
return false;
}
// Find last page
auto const lastIndex = sleRoot->getFieldU64(sfIndexPrevious);
if (lastIndex == 0)
{
res = Unexpected<Error>(DirectoryTooSmall);
return false;
}
if (sb.exists(keylet::page(directory, newLastPage)))
{
res = Unexpected<Error>(DirectoryPageDuplicate);
return false;
}
if (lastIndex >= newLastPage)
{
res = Unexpected<Error>(InvalidLastPage);
return false;
}
auto slePage = sb.peek(keylet::page(directory, lastIndex));
if (!slePage)
{
res = Unexpected<Error>(DirectoryPageNotFound);
return false;
}
// Copy its data and delete the page
auto indexes = slePage->getFieldV256(sfIndexes);
auto prevIndex = slePage->at(~sfIndexPrevious);
auto owner = slePage->at(~sfOwner);
sb.erase(slePage);
// Create new page to replace slePage
auto sleNew =
std::make_shared<SLE>(keylet::page(directory, newLastPage));
sleNew->setFieldH256(sfRootIndex, directory.key);
sleNew->setFieldV256(sfIndexes, indexes);
if (owner)
sleNew->setAccountID(sfOwner, *owner);
if (prevIndex)
sleNew->setFieldU64(sfIndexPrevious, *prevIndex);
sb.insert(sleNew);
// Adjust root previous and previous node's next
sleRoot->setFieldU64(sfIndexPrevious, newLastPage);
if (prevIndex.value_or(0) == 0)
sleRoot->setFieldU64(sfIndexNext, newLastPage);
else
{
auto slePrev = sb.peek(keylet::page(directory, *prevIndex));
if (!slePrev)
{
res = Unexpected<Error>(DirectoryPageNotFound);
return false;
}
slePrev->setFieldU64(sfIndexNext, newLastPage);
sb.update(slePrev);
}
sb.update(sleRoot);
// Fixup page numbers in the objects referred by indexes
if (adjust)
for (auto const key : indexes)
{
if (!adjust(sb, key, newLastPage))
{
res = Unexpected<Error>(AdjustmentError);
return false;
}
}
sb.apply(view);
return true;
});
return res;
}
bool
adjustOwnerNode(ApplyView& view, uint256 key, std::uint64_t page)
{
auto sle = view.peek({ltANY, key});
if (sle && sle->isFieldPresent(sfOwnerNode))
{
sle->setFieldU64(sfOwnerNode, page);
view.update(sle);
return true;
}
return false;
}
} // namespace directory
} // namespace ripple::test::jtx

View File

@@ -19,7 +19,7 @@
#include <test/jtx/utility.h>
#include <xrpld/net/RPCCall.h>
#include <xrpld/rpc/RPCCall.h>
#include <xrpl/basics/contract.h>
#include <xrpl/json/Object.h>

View File

@@ -23,11 +23,9 @@
#include <xrpl/basics/random.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/Protocol.h>
#include <xrpl/protocol/TER.h>
#include <xrpl/protocol/jss.h>
#include <algorithm>
#include <limits>
namespace ripple {
namespace test {
@@ -492,91 +490,6 @@ struct Directory_test : public beast::unit_test::suite
}
}
void
testDirectoryFull()
{
using namespace test::jtx;
Account alice("alice");
auto const testCase = [&, this](FeatureBitset features, auto setup) {
using namespace test::jtx;
Env env(*this, features);
env.fund(XRP(20000), alice);
env.close();
auto const [lastPage, full] = setup(env);
// Populate root page and last page
for (int i = 0; i < 63; ++i)
env(credentials::create(alice, alice, std::to_string(i)));
env.close();
// NOTE, everything below can only be tested on open ledger because
// there is no transaction type to express what bumpLastPage does.
// Bump position of last page from 1 to highest possible
auto const res = directory::bumpLastPage(
env,
lastPage,
keylet::ownerDir(alice.id()),
[lastPage, this](
ApplyView& view, uint256 key, std::uint64_t page) {
auto sle = view.peek({ltCREDENTIAL, key});
if (!BEAST_EXPECT(sle))
return false;
BEAST_EXPECT(page == lastPage);
sle->setFieldU64(sfIssuerNode, page);
// sfSubjectNode is not set in self-issued credentials
view.update(sle);
return true;
});
BEAST_EXPECT(res);
// Create one more credential
env(credentials::create(alice, alice, std::to_string(63)));
// Not enough space for another object if full
auto const expected = full ? ter{tecDIR_FULL} : ter{tesSUCCESS};
env(credentials::create(alice, alice, "foo"), expected);
// Destroy all objects in directory
for (int i = 0; i < 64; ++i)
env(credentials::deleteCred(
alice, alice, alice, std::to_string(i)));
if (!full)
env(credentials::deleteCred(alice, alice, alice, "foo"));
// Verify directory is empty.
auto const sle = env.le(keylet::ownerDir(alice.id()));
BEAST_EXPECT(sle == nullptr);
// Test completed
env.close();
};
testCase(
testable_amendments() - fixDirectoryLimit,
[this](Env&) -> std::tuple<std::uint64_t, bool> {
testcase("directory full without fixDirectoryLimit");
return {dirNodeMaxPages - 1, true};
});
testCase(
testable_amendments(), //
[this](Env&) -> std::tuple<std::uint64_t, bool> {
testcase("directory not full with fixDirectoryLimit");
return {dirNodeMaxPages - 1, false};
});
testCase(
testable_amendments(), //
[this](Env&) -> std::tuple<std::uint64_t, bool> {
testcase("directory full with fixDirectoryLimit");
return {std::numeric_limits<std::uint64_t>::max(), true};
});
}
void
run() override
{
@@ -585,7 +498,6 @@ struct Directory_test : public beast::unit_test::suite
testRipd1353();
testEmptyChain();
testPreviousTxnID();
testDirectoryFull();
}
};

View File

@@ -18,7 +18,7 @@
#include <test/jtx.h>
#include <test/jtx/utility.h>
#include <xrpld/net/RPCCall.h>
#include <xrpld/rpc/RPCCall.h>
#include <xrpld/rpc/detail/RPCHelpers.h>
#include <xrpl/beast/unit_test.h>

View File

@@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite,
resp["Upgrade"] == "websocket");
BEAST_EXPECT(
resp.find("Connection") != resp.end() &&
resp["Connection"] == "upgrade");
resp["Connection"] == "Upgrade");
}
void

View File

@@ -2,10 +2,11 @@ include(xrpl_add_test)
# Test requirements.
find_package(doctest REQUIRED)
find_package(RapidJSON REQUIRED)
# Common library dependencies for the rest of the tests.
add_library(xrpl.imports.test INTERFACE)
target_link_libraries(xrpl.imports.test INTERFACE doctest::doctest xrpl.libxrpl)
target_link_libraries(xrpl.imports.test INTERFACE doctest::doctest rapidjson xrpl.libxrpl)
# One test for each module.
xrpl_add_test(basics)

View File

@@ -0,0 +1,588 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/basics/Log.h>
#include <doctest/doctest.h>
#include <rapidjson/document.h>
using namespace ripple;
class MockLogs : public Logs
{
private:
class Sink : public beast::Journal::Sink
{
private:
MockLogs& logs_;
std::string partition_;
public:
Sink(
std::string const& partition,
beast::severities::Severity thresh,
MockLogs& logs)
: beast::Journal::Sink(thresh, false)
, logs_(logs)
, partition_(partition)
{
}
Sink(Sink const&) = delete;
Sink&
operator=(Sink const&) = delete;
void
write(beast::severities::Severity level, std::string const& text)
override
{
logs_.logStream_ << text;
}
void
writeAlways(beast::severities::Severity level, std::string const& text)
override
{
logs_.logStream_ << text;
}
};
std::stringstream& logStream_;
public:
MockLogs(std::stringstream& logStream, beast::severities::Severity level)
: Logs(level), logStream_(logStream)
{
}
std::unique_ptr<beast::Journal::Sink>
makeSink(
std::string const& partition,
beast::severities::Severity startingLevel) override
{
return std::make_unique<Sink>(partition, startingLevel, *this);
}
};
TEST_CASE("Text logs")
{
std::stringstream logStream;
MockLogs logs{logStream, beast::severities::kAll};
logs.journal("Test").debug() << "Test";
CHECK(logStream.str().find("Test") != std::string::npos);
logStream.str("");
logs.journal("Test").debug() << "\n";
CHECK(logStream.str().find("\n") == std::string::npos);
}
TEST_CASE("Test format output")
{
std::string output;
Logs::format(output, "Message", beast::severities::kDebug, "Test");
CHECK(output.find("Message") != std::string::npos);
CHECK(output != "Message");
}
TEST_CASE("Test format output when structured logs are enabled")
{
beast::Journal::enableStructuredJournal();
std::string output;
Logs::format(output, "Message", beast::severities::kDebug, "Test");
CHECK(output == "Message");
beast::Journal::disableStructuredJournal();
}
TEST_CASE("Enable json logs")
{
std::stringstream logStream;
MockLogs logs{logStream, beast::severities::kAll};
logs.journal("Test").debug() << "Test";
CHECK(logStream.str() == "Test");
logStream.str("");
beast::Journal::enableStructuredJournal();
logs.journal("Test").debug() << "\n";
rapidjson::Document doc;
doc.Parse(logStream.str().c_str());
CHECK(doc.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(doc.IsObject());
CHECK(doc.HasMember("Message"));
CHECK(doc["Message"].IsString());
CHECK(doc["Message"].GetString() == std::string{""});
beast::Journal::disableStructuredJournal();
}
TEST_CASE("Global attributes")
{
std::stringstream logStream;
MockLogs logs{logStream, beast::severities::kAll};
beast::Journal::enableStructuredJournal();
beast::Journal::addGlobalAttributes(
log::attributes(log::attr("Field1", "Value1")));
logs.journal("Test").debug() << "Test";
rapidjson::Document jsonLog;
jsonLog.Parse(logStream.str().c_str());
CHECK(
jsonLog.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(jsonLog.IsObject());
CHECK(jsonLog.HasMember("Field1"));
CHECK(jsonLog["Field1"].IsString());
CHECK(jsonLog["Field1"].GetString() == std::string{"Value1"});
beast::Journal::disableStructuredJournal();
}
TEST_CASE("Global attributes inheritable")
{
std::stringstream logStream;
MockLogs logs{logStream, beast::severities::kAll};
beast::Journal::enableStructuredJournal();
beast::Journal::addGlobalAttributes(
log::attributes(log::attr("Field1", "Value1")));
logs.journal(
"Test",
log::attributes(
log::attr("Field1", "Value3"), log::attr("Field2", "Value2")))
.debug()
<< "Test";
rapidjson::Document jsonLog;
jsonLog.Parse(logStream.str().c_str());
CHECK(
jsonLog.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(jsonLog.IsObject());
CHECK(jsonLog.HasMember("Field1"));
CHECK(jsonLog["Field1"].IsString());
// Field1 should be overwritten to Value3
CHECK(jsonLog["Field1"].GetString() == std::string{"Value3"});
CHECK(jsonLog["Field2"].IsString());
CHECK(jsonLog["Field2"].GetString() == std::string{"Value2"});
beast::Journal::disableStructuredJournal();
}
/**
* @brief sink for writing all log messages to a stringstream
*/
class MockSink : public beast::Journal::Sink
{
std::stringstream& strm_;
public:
MockSink(beast::severities::Severity threshold, std::stringstream& strm)
: beast::Journal::Sink(threshold, false), strm_(strm)
{
}
void
write(beast::severities::Severity level, std::string const& text) override
{
strm_ << text;
}
void
writeAlways(beast::severities::Severity level, std::string const& text)
override
{
strm_ << text;
}
};
class JsonLogStreamFixture
{
public:
JsonLogStreamFixture()
: sink_(beast::severities::kAll, logStream_), j_(sink_)
{
beast::Journal::enableStructuredJournal();
}
~JsonLogStreamFixture()
{
beast::Journal::disableStructuredJournal();
}
std::stringstream&
stream()
{
return logStream_;
}
beast::Journal&
journal()
{
return j_;
}
private:
MockSink sink_;
std::stringstream logStream_;
beast::Journal j_;
};
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJsonLogFields")
{
journal().debug() << std::boolalpha << true << std::noboolalpha << " Test "
<< std::boolalpha << false;
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue.IsObject());
CHECK(logValue.HasMember("Function"));
CHECK(logValue.HasMember("File"));
CHECK(logValue.HasMember("Line"));
CHECK(logValue.HasMember("ThreadId"));
CHECK(logValue.HasMember("Params"));
CHECK(logValue.HasMember("Level"));
CHECK(logValue.HasMember("Message"));
CHECK(logValue.HasMember("Time"));
CHECK(logValue["Function"].IsString());
CHECK(logValue["File"].IsString());
CHECK(logValue["Line"].IsNumber());
CHECK(logValue["Params"].IsObject());
CHECK(logValue["Message"].IsString());
CHECK(logValue["Message"].GetString() == std::string{"true Test false"});
}
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJsonLogLevels")
{
{
stream().str("");
journal().trace() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() ==
rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kTrace));
}
{
stream().str("");
journal().debug() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() ==
rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kDebug));
}
{
stream().str("");
journal().info() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() ==
rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kInfo));
}
{
stream().str("");
journal().warn() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() ==
rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kWarning));
}
{
stream().str("");
journal().error() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() ==
rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kError));
}
{
stream().str("");
journal().fatal() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() ==
rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kFatal));
}
}
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJsonLogStream")
{
journal().stream(beast::severities::kError) << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(
logValue["Level"].GetString() ==
beast::severities::to_string(beast::severities::kError));
}
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJsonLogParams")
{
journal().debug() << "Test: " << log::param("Field1", 1) << ", "
<< log::param(
"Field2",
std::numeric_limits<std::uint64_t>::max());
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Params"].IsObject());
CHECK(logValue["Params"]["Field1"].IsNumber());
CHECK(logValue["Params"]["Field1"].GetInt() == 1);
// UInt64 doesn't fit in Json::Value so it should be converted to a string
// NOTE: We should expect it to be an int64 after we make the json library
// support in64 and uint64
CHECK(logValue["Params"]["Field2"].IsNumber());
CHECK(
logValue["Params"]["Field2"].GetUint64() ==
std::numeric_limits<std::uint64_t>::max());
CHECK(logValue["Message"].IsString());
CHECK(
logValue["Message"].GetString() ==
std::string{"Test: 1, 18446744073709551615"});
}
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJsonLogFields")
{
journal().debug() << "Test" << log::field("Field1", 1)
<< log::field(
"Field2",
std::numeric_limits<std::uint64_t>::max());
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Params"].IsObject());
CHECK(logValue["Params"]["Field1"].IsNumber());
CHECK(logValue["Params"]["Field1"].GetInt() == 1);
// UInt64 doesn't fit in Json::Value so it should be converted to a string
// NOTE: We should expect it to be an int64 after we make the json library
// support in64 and uint64
CHECK(logValue["Params"]["Field2"].IsNumber());
CHECK(
logValue["Params"]["Field2"].GetUint64() ==
std::numeric_limits<std::uint64_t>::max());
CHECK(logValue["Message"].IsString());
CHECK(logValue["Message"].GetString() == std::string{"Test"});
}
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJournalAttributes")
{
beast::Journal j{
journal(),
log::attributes(log::attr("Field1", "Value1"), log::attr("Field2", 2))};
j.debug() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Field1"].IsString());
CHECK(logValue["Field1"].GetString() == std::string{"Value1"});
CHECK(logValue["Field2"].IsNumber());
CHECK(logValue["Field2"].GetInt() == 2);
}
TEST_CASE_FIXTURE(JsonLogStreamFixture, "TestJournalAttributesInheritable")
{
beast::Journal j{
journal(),
log::attributes(log::attr("Field1", "Value1"), log::attr("Field2", 2))};
beast::Journal j2{
j,
log::attributes(log::attr("Field3", "Value3"), log::attr("Field2", 0))};
j2.debug() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Field1"].IsString());
CHECK(logValue["Field1"].GetString() == std::string{"Value1"});
CHECK(logValue["Field3"].IsString());
CHECK(logValue["Field3"].GetString() == std::string{"Value3"});
// Field2 should be overwritten to 0
CHECK(logValue["Field2"].IsNumber());
CHECK(logValue["Field2"].GetInt() == 0);
}
TEST_CASE_FIXTURE(
JsonLogStreamFixture,
"TestJournalAttributesInheritableAfterMoving")
{
beast::Journal j{
journal(),
log::attributes(log::attr("Field1", "Value1"), log::attr("Field2", 2))};
beast::Journal j2{
j,
log::attributes(log::attr("Field3", "Value3"), log::attr("Field2", 0))};
j2.debug() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Field1"].IsString());
CHECK(logValue["Field1"].GetString() == std::string{"Value1"});
CHECK(logValue["Field3"].IsString());
CHECK(logValue["Field3"].GetString() == std::string{"Value3"});
// Field2 should be overwritten to 0
CHECK(logValue["Field2"].IsNumber());
CHECK(logValue["Field2"].GetInt() == 0);
}
TEST_CASE_FIXTURE(
JsonLogStreamFixture,
"TestJournalAttributesInheritableAfterCopyAssignment")
{
beast::Journal j{
std::move(journal()),
log::attributes(log::attr("Field1", "Value1"), log::attr("Field2", 2))};
beast::Journal j2{beast::Journal::getNullSink()};
j2 = j;
j2.debug() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Field1"].IsString());
CHECK(logValue["Field1"].GetString() == std::string{"Value1"});
CHECK(logValue["Field2"].IsNumber());
CHECK(logValue["Field2"].GetInt() == 2);
}
TEST_CASE_FIXTURE(
JsonLogStreamFixture,
"TestJournalAttributesInheritableAfterMoveAssignment")
{
beast::Journal j{
journal(),
log::attributes(log::attr("Field1", "Value1"), log::attr("Field2", 2))};
beast::Journal j2{beast::Journal::getNullSink()};
j2 = std::move(j);
j2.debug() << "Test";
rapidjson::Document logValue;
logValue.Parse(stream().str().c_str());
CHECK(
logValue.GetParseError() == rapidjson::ParseErrorCode::kParseErrorNone);
CHECK(logValue["Field1"].IsString());
CHECK(logValue["Field1"].GetString() == std::string{"Value1"});
CHECK(logValue["Field2"].IsNumber());
CHECK(logValue["Field2"].GetInt() == 2);
}

View File

@@ -1107,8 +1107,13 @@ RCLConsensus::startRound(
RclConsensusLogger::RclConsensusLogger(
char const* label,
bool const validating,
beast::Journal j)
: j_(j)
beast::Journal j,
std::source_location location)
: j_(j,
log::attributes(
log::attr("Role", "ConsensusLogger"),
log::attr("Label", label)))
, location_(location)
{
if (!validating && !j.info())
return;
@@ -1125,11 +1130,11 @@ RclConsensusLogger::~RclConsensusLogger()
return;
auto const duration = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start_);
std::stringstream outSs;
outSs << header_ << "duration " << (duration.count() / 1000) << '.'
<< std::setw(3) << std::setfill('0') << (duration.count() % 1000)
<< "s. " << ss_->str();
j_.sink().writeAlways(beast::severities::kInfo, outSs.str());
j_.info(location_) << header_ << "duration " << (duration.count() / 1000)
<< '.' << std::setw(3) << std::setfill('0')
<< (duration.count() % 1000) << "s. " << ss_->str()
<< log::field("Duration", duration.count());
}
} // namespace ripple

View File

@@ -553,12 +553,14 @@ class RclConsensusLogger
beast::Journal j_;
std::unique_ptr<std::stringstream> ss_;
std::chrono::steady_clock::time_point start_;
std::source_location location_;
public:
explicit RclConsensusLogger(
char const* label,
bool validating,
beast::Journal j);
beast::Journal j,
std::source_location location = std::source_location::current());
~RclConsensusLogger();
std::unique_ptr<std::stringstream> const&

View File

@@ -20,7 +20,7 @@
#ifndef RIPPLE_APP_LEDGER_BOOKLISTENERS_H_INCLUDED
#define RIPPLE_APP_LEDGER_BOOKLISTENERS_H_INCLUDED
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpl/protocol/MultiApiJson.h>

View File

@@ -63,8 +63,6 @@ LedgerHistory::insert(
ledger->stateMap().getHash().isNonZero(),
"ripple::LedgerHistory::insert : nonzero hash");
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
bool const alreadyHad = m_ledgers_by_hash.canonicalize_replace_cache(
ledger->info().hash, ledger);
if (validated)
@@ -76,7 +74,6 @@ LedgerHistory::insert(
LedgerHash
LedgerHistory::getLedgerHash(LedgerIndex index)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end())
return it->second;
return {};
@@ -86,13 +83,11 @@ std::shared_ptr<Ledger const>
LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto it = mLedgersByIndex.find(index);
if (it != mLedgersByIndex.end())
{
uint256 hash = it->second;
sl.unlock();
return getLedgerByHash(hash);
}
}
@@ -108,7 +103,6 @@ LedgerHistory::getLedgerBySeq(LedgerIndex index)
{
// Add this ledger to the local tracking by index
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
XRPL_ASSERT(
ret->isImmutable(),
@@ -458,8 +452,6 @@ LedgerHistory::builtLedger(
XRPL_ASSERT(
!hash.isZero(), "ripple::LedgerHistory::builtLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -500,8 +492,6 @@ LedgerHistory::validatedLedger(
!hash.isZero(),
"ripple::LedgerHistory::validatedLedger : nonzero hash");
std::unique_lock sl(m_consensus_validated.peekMutex());
auto entry = std::make_shared<cv_entry>();
m_consensus_validated.canonicalize_replace_client(index, entry);
@@ -535,10 +525,9 @@ LedgerHistory::validatedLedger(
bool
LedgerHistory::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{
std::unique_lock sl(m_ledgers_by_hash.peekMutex());
auto ledger = m_ledgers_by_hash.fetch(ledgerHash);
auto it = mLedgersByIndex.find(ledgerIndex);
if ((it != mLedgersByIndex.end()) && (it->second != ledgerHash))
if (ledger && (it != mLedgersByIndex.end()) && (it->second != ledgerHash))
{
it->second = ledgerHash;
return false;

View File

@@ -833,7 +833,10 @@ public:
serverOkay(std::string& reason) override;
beast::Journal
journal(std::string const& name) override;
journal(
std::string const& name,
std::optional<beast::Journal::JsonLogAttributes> attributes =
std::nullopt) override;
//--------------------------------------------------------------------------
@@ -1212,8 +1215,15 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline)
}
JLOG(m_journal.info()) << "Process starting: "
<< BuildInfo::getFullVersionString()
<< ", Instance Cookie: " << instanceCookie_;
<< log::param(
"RippledVersion",
BuildInfo::getFullVersionString())
<< ", Instance Cookie: "
<< log::param("InstanceCookie", instanceCookie_);
beast::Journal::addGlobalAttributes(log::attributes(
log::attr("RippledVersion", BuildInfo::getFullVersionString()),
log::attr("InstanceCookie", to_string(instanceCookie_))));
if (numberOfThreads(*config_) < 2)
{
@@ -2161,9 +2171,11 @@ ApplicationImp::serverOkay(std::string& reason)
}
beast::Journal
ApplicationImp::journal(std::string const& name)
ApplicationImp::journal(
std::string const& name,
std::optional<beast::Journal::JsonLogAttributes> attributes)
{
return logs_->journal(name);
return logs_->journal(name, std::move(attributes));
}
void

View File

@@ -258,7 +258,10 @@ public:
serverOkay(std::string& reason) = 0;
virtual beast::Journal
journal(std::string const& name) = 0;
journal(
std::string const& name,
std::optional<beast::Journal::JsonLogAttributes> attributes =
std::nullopt) = 0;
/* Returns the number of file descriptors the application needs */
virtual int

View File

@@ -22,9 +22,9 @@
#include <xrpld/app/main/Application.h>
#include <xrpld/core/JobQueue.h>
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/Context.h>
#include <xrpld/rpc/GRPCHandlers.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpld/rpc/Role.h>
#include <xrpld/rpc/detail/Handler.h>
#include <xrpld/rpc/detail/RPCHelpers.h>

View File

@@ -22,7 +22,7 @@
#include <xrpld/core/Config.h>
#include <xrpld/core/ConfigSections.h>
#include <xrpld/core/TimeKeeper.h>
#include <xrpld/net/RPCCall.h>
#include <xrpld/rpc/RPCCall.h>
#include <xrpl/basics/Log.h>
#include <xrpl/beast/core/CurrentThreadName.h>
@@ -788,6 +788,14 @@ run(int argc, char** argv)
else if (vm.count("verbose"))
thresh = kTrace;
if (config->LOG_STYLE == LogStyle::Json)
{
beast::Journal::enableStructuredJournal();
beast::Journal::addGlobalAttributes(log::attributes(
log::attr("Application", "rippled"),
log::attr("NetworkID", config->NETWORK_ID)));
}
auto logs = std::make_unique<Logs>(thresh);
// No arguments. Run server.

View File

@@ -594,7 +594,7 @@ public:
{
JLOG(m_journal.error())
<< "NetworkOPs: heartbeatTimer cancel error: "
<< ec.message();
<< log::param("Reason", ec.message());
}
ec.clear();
@@ -603,7 +603,7 @@ public:
{
JLOG(m_journal.error())
<< "NetworkOPs: clusterTimer cancel error: "
<< ec.message();
<< log::param("Reason", ec.message());
}
ec.clear();
@@ -612,7 +612,7 @@ public:
{
JLOG(m_journal.error())
<< "NetworkOPs: accountHistoryTxTimer cancel error: "
<< ec.message();
<< log::param("Reason", ec.message());
}
}
// Make sure that any waitHandlers pending in our timers are done.
@@ -977,9 +977,9 @@ NetworkOPsImp::setTimer(
e.value() != boost::asio::error::operation_aborted)
{
// Try again later and hope for the best.
JLOG(m_journal.error())
<< "Timer got error '" << e.message()
<< "'. Restarting timer.";
JLOG(m_journal.error()) << "Timer got error '"
<< log::param("Error", e.message())
<< "'. Restarting timer.";
onError();
}
}))
@@ -1022,8 +1022,9 @@ NetworkOPsImp::setClusterTimer()
void
NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
{
JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
<< toBase58(subInfo.index_->accountId_);
JLOG(m_journal.debug())
<< "Scheduling AccountHistory job for account "
<< log::param("AccountID", toBase58(subInfo.index_->accountId_));
using namespace std::chrono_literals;
setTimer(
accountHistoryTxTimer_,
@@ -1055,7 +1056,9 @@ NetworkOPsImp::processHeartbeatTimer()
std::stringstream ss;
ss << "Node count (" << numPeers << ") has fallen "
<< "below required minimum (" << minPeerCount_ << ").";
JLOG(m_journal.warn()) << ss.str();
JLOG(m_journal.warn())
<< ss.str() << log::field("NodeCount", numPeers)
<< log::field("RequiredMinimum", minPeerCount_);
CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
}
else
@@ -1078,7 +1081,8 @@ NetworkOPsImp::processHeartbeatTimer()
{
setMode(OperatingMode::CONNECTED);
JLOG(m_journal.info())
<< "Node count (" << numPeers << ") is sufficient.";
<< "Node count (" << log::param("NodeCount", numPeers)
<< ") is sufficient.";
CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
<< " peers. ";
}
@@ -1186,6 +1190,10 @@ NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
void
NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
{
beast::Journal journal{
m_journal,
log::attributes(
log::attr("TransactionID", to_string(iTrans->getTransactionID())))};
if (isNeedNetworkLedger())
{
// Nothing we can do if we've never been in sync
@@ -1196,7 +1204,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
if (iTrans->isFlag(tfInnerBatchTxn) &&
m_ledgerMaster.getValidatedRules().enabled(featureBatch))
{
JLOG(m_journal.error())
JLOG(journal.error())
<< "Submitted transaction invalid: tfInnerBatchTxn flag present.";
return;
}
@@ -1209,7 +1217,7 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
{
JLOG(m_journal.warn()) << "Submitted transaction cached bad";
JLOG(journal.warn()) << "Submitted transaction cached bad";
return;
}
@@ -1223,15 +1231,16 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
if (validity != Validity::Valid)
{
JLOG(m_journal.warn())
<< "Submitted transaction invalid: " << reason;
JLOG(journal.warn()) << "Submitted transaction invalid: "
<< log::param("Reason", reason);
return;
}
}
catch (std::exception const& ex)
{
JLOG(m_journal.warn())
<< "Exception checking transaction " << txid << ": " << ex.what();
JLOG(journal.warn()) << "Exception checking transaction "
<< log::param("TransactionID", txid) << ": "
<< log::param("Reason", ex.what());
return;
}
@@ -1249,12 +1258,18 @@ NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
bool
NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
{
beast::Journal journal{
m_journal,
log::attributes(
log::attr("TransactionID", to_string(transaction->getID())))};
auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
{
// cached bad
JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
JLOG(journal.warn())
<< log::param("TransactionID", transaction->getID())
<< ": cached bad!\n";
transaction->setStatus(INVALID);
transaction->setResult(temBAD_SIGNATURE);
return false;
@@ -1287,7 +1302,8 @@ NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
// Not concerned with local checks at this point.
if (validity == Validity::SigBad)
{
JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
JLOG(journal.info()) << "Transaction has bad signature: "
<< log::param("Reason", reason);
transaction->setStatus(INVALID);
transaction->setResult(temBAD_SIGNATURE);
app_.getHashRouter().setFlags(
@@ -1412,7 +1428,10 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
if (!reason.empty())
{
JLOG(m_journal.trace())
<< "Exception checking transaction: " << reason;
<< "Exception checking transaction: "
<< log::param(
"TransactionID", to_string(transaction->getID()))
<< ", reason: " << log::param("Reason", reason);
}
app_.getHashRouter().setFlags(
tx->getTransactionID(), HashRouterFlags::BAD);
@@ -1448,11 +1467,6 @@ NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
for (auto& t : transactions)
mTransactions.push_back(std::move(t));
}
if (mTransactions.empty())
{
JLOG(m_journal.debug()) << "No transaction to process!";
return;
}
doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
XRPL_ASSERT(
@@ -1557,7 +1571,9 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
if (transResultInfo(e.result, token, human))
{
JLOG(m_journal.info())
<< "TransactionResult: " << token << ": " << human;
<< "TransactionResult: " << log::param("Token", token)
<< ": " << log::param("Human", human)
<< log::field("TransactionID", e.transaction->getID());
}
}
#endif
@@ -1567,7 +1583,8 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
if (e.result == tesSUCCESS)
{
JLOG(m_journal.debug())
<< "Transaction is now included in open ledger";
<< "Transaction is now included in open ledger"
<< log::field("TransactionID", e.transaction->getID());
e.transaction->setStatus(INCLUDED);
// Pop as many "reasonable" transactions for this account as
@@ -1603,7 +1620,8 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
{
JLOG(m_journal.debug())
<< "Transaction is likely to claim a"
<< " fee, but is queued until fee drops";
<< " fee, but is queued until fee drops"
<< log::field("TransactionID", e.transaction->getID());
e.transaction->setStatus(HELD);
// Add to held transactions, because it could get
@@ -1649,7 +1667,8 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
{
// transaction should be held
JLOG(m_journal.debug())
<< "Transaction should be held: " << e.result;
<< "Transaction should be held: "
<< log::param("Result", e.result);
e.transaction->setStatus(HELD);
m_ledgerMaster.addHeldTransaction(e.transaction);
e.transaction->setKept();
@@ -1657,17 +1676,23 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
else
JLOG(m_journal.debug())
<< "Not holding transaction "
<< e.transaction->getID() << ": "
<< (e.local ? "local" : "network") << ", "
<< "result: " << e.result << " ledgers left: "
<< (ledgersLeft ? to_string(*ledgersLeft)
: "unspecified");
<< log::param(
"TransactionID",
to_string(e.transaction->getID()))
<< ": " << (e.local ? "local" : "network") << ", "
<< "result: " << log::param("Result", e.result)
<< " ledgers left: "
<< log::param(
"LedgersLeft",
ledgersLeft ? to_string(*ledgersLeft)
: "unspecified");
}
}
else
{
JLOG(m_journal.debug())
<< "Status other than success " << e.result;
<< "Status other than success " << e.result
<< log::field("TransactionID", e.transaction->getID());
e.transaction->setStatus(INVALID);
}
@@ -1896,15 +1921,19 @@ NetworkOPsImp::checkLastClosedLedger(
uint256 closedLedger = ourClosed->info().hash;
uint256 prevClosedLedger = ourClosed->info().parentHash;
JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
JLOG(m_journal.trace())
<< "OurClosed: " << log::param("ClosedLedger", closedLedger);
JLOG(m_journal.trace())
<< "PrevClosed: "
<< log::param("PreviouslyClosedLedger", prevClosedLedger);
//-------------------------------------------------------------------------
// Determine preferred last closed ledger
auto& validations = app_.getValidations();
JLOG(m_journal.debug())
<< "ValidationTrie " << Json::Compact(validations.getJsonTrie());
<< "ValidationTrie "
<< log::param("ValidationTrie", validations.getJsonTrie());
// Will rely on peer LCL if no trusted validations exist
hash_map<uint256, std::uint32_t> peerCounts;

View File

@@ -24,7 +24,7 @@
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/core/JobQueue.h>
#include <xrpld/ledger/ReadView.h>
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpl/protocol/STValidation.h>
#include <xrpl/protocol/messages.h>

View File

@@ -33,7 +33,12 @@ WorkSSL::WorkSSL(
bool lastStatus,
callback_type cb)
: WorkBase(host, path, port, ios, lastEndpoint, lastStatus, cb)
, context_(config, j, boost::asio::ssl::context::tlsv12_client)
, context_(
config.SSL_VERIFY_DIR,
config.SSL_VERIFY_FILE,
config.SSL_VERIFY,
j,
boost::asio::ssl::context::tlsv12_client)
, stream_(socket_, context_.context())
{
auto ec = context_.preConnectVerify(stream_, host_);

View File

@@ -22,9 +22,9 @@
#include <xrpld/app/misc/detail/WorkBase.h>
#include <xrpld/core/Config.h>
#include <xrpld/net/HTTPClientSSLContext.h>
#include <xrpl/basics/contract.h>
#include <xrpl/net/HTTPClientSSLContext.h>
#include <boost/asio/ssl.hpp>
#include <boost/format.hpp>

View File

@@ -23,7 +23,7 @@
#include <xrpld/app/ledger/Ledger.h>
#include <xrpld/app/paths/Pathfinder.h>
#include <xrpld/app/paths/RippleLineCache.h>
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpl/basics/base_uint.h>
#include <xrpl/json/json_value.h>

View File

@@ -206,7 +206,12 @@ preflight2(PreflightContext const& ctx)
//------------------------------------------------------------------------------
Transactor::Transactor(ApplyContext& ctx)
: ctx_(ctx), j_(ctx.journal), account_(ctx.tx.getAccountID(sfAccount))
: ctx_(ctx)
, account_(ctx.tx.getAccountID(sfAccount))
, j_(ctx.journal,
log::attributes(
log::attr("TransactionID", to_string(ctx_.tx.getTransactionID())),
log::attr("AccountID", to_string(account_))))
{
}

View File

@@ -52,7 +52,10 @@ public:
, rules(rules_)
, flags(flags_)
, parentBatchId(parentBatchId_)
, j(j_)
, j(j_,
log::attributes(
log::attr("TransactionID", to_string(tx.getTransactionID())),
log::attr("AccountID", to_string(tx.getAccountID(sfAccount)))))
{
XRPL_ASSERT(
(flags_ & tapBATCH) == tapBATCH, "Batch apply flag should be set");
@@ -100,7 +103,10 @@ public:
, flags(flags_)
, tx(tx_)
, parentBatchId(parentBatchId_)
, j(j_)
, j(j_,
log::attributes(
log::attr("TransactionID", to_string(tx.getTransactionID())),
log::attr("AccountID", to_string(tx.getAccountID(sfAccount)))))
{
XRPL_ASSERT(
parentBatchId.has_value() == ((flags_ & tapBATCH) == tapBATCH),
@@ -138,12 +144,13 @@ class Transactor
{
protected:
ApplyContext& ctx_;
beast::Journal const j_;
AccountID const account_;
XRPAmount mPriorBalance; // Balance before fees.
XRPAmount mSourceBalance; // Balance after fees.
beast::Journal const j_;
virtual ~Transactor() = default;
Transactor(Transactor const&) = delete;
Transactor&

View File

@@ -180,6 +180,13 @@ public:
}
}
template <class Closure>
Substitute<Closure>
forceWrap(Closure&& closure)
{
return {*this, std::forward<Closure>(closure)};
}
/** Wrap the passed closure with a reference counter.
@param closure Closure that accepts Args_t parameters and returns Ret_t.

View File

@@ -77,6 +77,16 @@ struct FeeSetup
* values.) */
};
/**
* We support producing plain text logs and structured json logs.
*/
namespace LogStyle {
enum LogStyle { LogFmt, Json };
LogStyle
fromString(std::string const&);
}; // namespace LogStyle
// This entire derived class is deprecated.
// For new config information use the style implied
// in the base class. For existing config information
@@ -299,6 +309,9 @@ public:
std::optional<std::size_t> VALIDATOR_LIST_THRESHOLD;
// Set it to LogStyle::Json to get structured json logs.
LogStyle::LogStyle LOG_STYLE = LogStyle::LogFmt;
public:
Config();

View File

@@ -48,6 +48,7 @@ struct ConfigSection
#define SECTION_CLUSTER_NODES "cluster_nodes"
#define SECTION_COMPRESSION "compression"
#define SECTION_DEBUG_LOGFILE "debug_logfile"
#define SECTION_LOG_STYLE "log_style"
#define SECTION_ELB_SUPPORT "elb_support"
#define SECTION_FEE_DEFAULT "fee_default"
#define SECTION_FETCH_DEPTH "fetch_depth"

View File

@@ -98,6 +98,11 @@ JobQueue::Coro::resume()
}
{
std::lock_guard lock(jq_.m_mutex);
XRPL_ASSERT(
jq_.nSuspend_ > 0,
"ripple::JobQueue::Coro::resume jq_.nSuspend_ should be greater "
"than 0");
--jq_.nSuspend_;
}
auto saved = detail::getLocalValues().release();
@@ -134,6 +139,11 @@ JobQueue::Coro::expectEarlyExit()
// That said, since we're outside the Coro's stack, we need to
// decrement the nSuspend that the Coro's call to yield caused.
std::lock_guard lock(jq_.m_mutex);
XRPL_ASSERT(
jq_.nSuspend_ > 0,
"ripple::JobQueue::Coro::expectEarlyExit() jq_.nSuspend_ should be "
"greater than 0");
--jq_.nSuspend_;
#ifndef NDEBUG
finished_ = true;

View File

@@ -19,7 +19,6 @@
#include <xrpld/core/Config.h>
#include <xrpld/core/ConfigSections.h>
#include <xrpld/net/HTTPClient.h>
#include <xrpl/basics/FileUtilities.h>
#include <xrpl/basics/Log.h>
@@ -27,6 +26,7 @@
#include <xrpl/basics/contract.h>
#include <xrpl/beast/core/LexicalCast.h>
#include <xrpl/json/json_reader.h>
#include <xrpl/net/HTTPClient.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/SystemParameters.h>
@@ -409,7 +409,8 @@ Config::setup(
legacy("database_path", boost::filesystem::absolute(dataDir).string());
}
HTTPClient::initializeSSLContext(*this, j_);
HTTPClient::initializeSSLContext(
this->SSL_VERIFY_DIR, this->SSL_VERIFY_FILE, this->SSL_VERIFY, j_);
if (RUN_STANDALONE)
LEDGER_HISTORY = 0;
@@ -689,6 +690,8 @@ Config::loadFromString(std::string const& fileContents)
if (getSingleSection(secConfig, SECTION_DEBUG_LOGFILE, strTemp, j_))
DEBUG_LOGFILE = strTemp;
LOG_STYLE = LogStyle::Json;
if (getSingleSection(secConfig, SECTION_SWEEP_INTERVAL, strTemp, j_))
{
SWEEP_INTERVAL = beast::lexicalCastThrow<std::size_t>(strTemp);
@@ -1077,6 +1080,14 @@ Config::loadFromString(std::string const& fileContents)
}
}
LogStyle::LogStyle
LogStyle::fromString(std::string const& str)
{
if (str == "json")
return Json;
return LogFmt;
}
boost::filesystem::path
Config::getDebugLogFile() const
{

View File

@@ -304,9 +304,10 @@ JobQueue::stop()
// but there may still be some threads between the return of
// `Job::doJob` and the return of `JobQueue::processTask`. That is why
// we must wait on the condition variable to make these assertions.
std::unique_lock<std::mutex> lock(m_mutex);
cv_.wait(
lock, [this] { return m_processCount == 0 && m_jobSet.empty(); });
std::unique_lock lock(m_mutex);
cv_.wait(lock, [this] {
return m_processCount == 0 && nSuspend_ == 0 && m_jobSet.empty();
});
XRPL_ASSERT(
m_processCount == 0,
"ripple::JobQueue::stop : all processes completed");

View File

@@ -23,9 +23,6 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <xrpl/protocol/Protocol.h>
#include <limits>
#include <type_traits>
namespace ripple {
std::optional<std::uint64_t>
@@ -95,21 +92,8 @@ ApplyView::dirAdd(
return page;
}
// We rely on modulo arithmetic of unsigned integers (guaranteed in
// [basic.fundamental] paragraph 2) to detect page representation overflow.
// For signed integers this would be UB, hence static_assert here.
static_assert(std::is_unsigned_v<decltype(page)>);
// Defensive check against breaking changes in compiler.
static_assert([]<typename T>(std::type_identity<T>) constexpr -> T {
T tmp = std::numeric_limits<T>::max();
return ++tmp;
}(std::type_identity<decltype(page)>{}) == 0);
++page;
// Check whether we're out of pages.
if (page == 0)
return std::nullopt;
if (!rules().enabled(fixDirectoryLimit) &&
page >= dirNodeMaxPages) // Old pages limit
if (++page >= dirNodeMaxPages)
return std::nullopt;
// We are about to create a new node; we'll link it to

View File

@@ -39,8 +39,7 @@ ConnectAttempt::ConnectAttempt(
: Child(overlay)
, app_(app)
, id_(id)
, sink_(journal, OverlayImpl::makePrefix(id))
, journal_(sink_)
, journal_(journal, log::attributes(log::attr("NodeID", id)))
, remote_endpoint_(remote_endpoint)
, usage_(usage)
, strand_(io_service)

View File

@@ -46,7 +46,6 @@ private:
Application& app_;
std::uint32_t const id_;
beast::WrappedSink sink_;
beast::Journal const journal_;
endpoint_type remote_endpoint_;
Resource::Consumer usage_;

View File

@@ -165,8 +165,8 @@ OverlayImpl::onHandoff(
endpoint_type remote_endpoint)
{
auto const id = next_id_++;
beast::WrappedSink sink(app_.logs()["Peer"], makePrefix(id));
beast::Journal journal(sink);
auto journal =
app_.journal("Peer", log::attributes(log::attr("NodeID", id)));
Handoff handoff;
if (processRequest(request, handoff))
@@ -332,14 +332,6 @@ OverlayImpl::isPeerUpgrade(http_request_type const& request)
return !versions.empty();
}
std::string
OverlayImpl::makePrefix(std::uint32_t id)
{
std::stringstream ss;
ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
return ss.str();
}
std::shared_ptr<Writer>
OverlayImpl::makeRedirectResponse(
std::shared_ptr<PeerFinder::Slot> const& slot,

View File

@@ -341,9 +341,6 @@ public:
return true;
}
static std::string
makePrefix(std::uint32_t id);
void
reportInboundTraffic(TrafficCount::category cat, int bytes);

View File

@@ -77,10 +77,22 @@ PeerImp::PeerImp(
: Child(overlay)
, app_(app)
, id_(id)
, sink_(app_.journal("Peer"), makePrefix(id))
, p_sink_(app_.journal("Protocol"), makePrefix(id))
, journal_(sink_)
, p_journal_(p_sink_)
, journal_(
app_.journal("Peer"),
log::attributes(
log::attr("NodeID", id),
log::attr("RemoteAddress", to_string(slot->remote_endpoint())),
log::attr(
"PublicKey",
toBase58(TokenType::NodePublic, publicKey))))
, p_journal_(
app_.journal("Protocol"),
log::attributes(
log::attr("NodeID", id),
log::attr("RemoteAddress", to_string(slot->remote_endpoint())),
log::attr(
"PublicKey",
toBase58(TokenType::NodePublic, publicKey))))
, stream_ptr_(std::move(stream_ptr))
, socket_(stream_ptr_->next_layer().socket())
, stream_(*stream_ptr_)
@@ -313,7 +325,8 @@ PeerImp::sendTxQueue()
std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
ht.add_hashes(hash.data(), hash.size());
});
JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
JLOG(p_journal_.trace())
<< "sendTxQueue " << log::param("TxQueueSize", txQueue_.size());
txQueue_.clear();
send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
}
@@ -333,7 +346,8 @@ PeerImp::addTxQueue(uint256 const& hash)
}
txQueue_.insert(hash);
JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
JLOG(p_journal_.trace())
<< "addTxQueue " << log::param("TxQueueSize", txQueue_.size());
}
void
@@ -345,7 +359,8 @@ PeerImp::removeTxQueue(uint256 const& hash)
std::bind(&PeerImp::removeTxQueue, shared_from_this(), hash));
auto removed = txQueue_.erase(hash);
JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
JLOG(p_journal_.trace())
<< "removeTxQueue " << log::param("ElementsRemoved", removed);
}
void
@@ -486,7 +501,8 @@ PeerImp::json()
default:
JLOG(p_journal_.warn())
<< "Unknown status: " << last_status.newstatus();
<< "Unknown status: "
<< log::param("NodeStatus", last_status.newstatus());
}
}
@@ -609,8 +625,10 @@ PeerImp::fail(std::string const& reason)
if (journal_.active(beast::severities::kWarning) && socket_.is_open())
{
std::string const n = name();
JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
<< " failed: " << reason;
JLOG(journal_.warn())
<< log::param(
"RemoteAddress", n.empty() ? remote_address_.to_string() : n)
<< " failed: " << reason;
}
close();
}
@@ -624,8 +642,11 @@ PeerImp::fail(std::string const& name, error_code ec)
if (socket_.is_open())
{
JLOG(journal_.warn())
<< name << " from " << toBase58(TokenType::NodePublic, publicKey_)
<< " at " << remote_address_.to_string() << ": " << ec.message();
<< log::param("Name", name) << " from "
<< log::param(
"PublicKey", toBase58(TokenType::NodePublic, publicKey_))
<< " at " << log::param("RemoteAddress", remote_address_) << ": "
<< log::param("ErrorMessage", ec.message());
}
close();
}
@@ -659,7 +680,8 @@ PeerImp::setTimer()
if (ec)
{
JLOG(journal_.error()) << "setTimer: " << ec.message();
JLOG(journal_.error())
<< "setTimer: " << log::param("ErrorMessage", ec.message());
return;
}
timer_.async_wait(bind_executor(
@@ -678,14 +700,6 @@ PeerImp::cancelTimer()
//------------------------------------------------------------------------------
std::string
PeerImp::makePrefix(id_t id)
{
std::stringstream ss;
ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
return ss.str();
}
void
PeerImp::onTimer(error_code const& ec)
{
@@ -698,7 +712,8 @@ PeerImp::onTimer(error_code const& ec)
if (ec)
{
// This should never happen
JLOG(journal_.error()) << "onTimer: " << ec.message();
JLOG(journal_.error())
<< "onTimer: " << log::param("ErrorMessage", ec.message());
return close();
}
@@ -770,7 +785,8 @@ PeerImp::doAccept()
read_buffer_.size() == 0,
"ripple::PeerImp::doAccept : empty read buffer");
JLOG(journal_.debug()) << "doAccept: " << remote_address_;
JLOG(journal_.debug()) << "doAccept: "
<< log::param("RemoteAddress", remote_address_);
auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
@@ -779,9 +795,12 @@ PeerImp::doAccept()
if (!sharedValue)
return fail("makeSharedValue: Unexpected failure");
JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
JLOG(journal_.info()) << "Protocol: "
<< log::param("Protocol", to_string(protocol_));
JLOG(journal_.info()) << "Public Key: "
<< toBase58(TokenType::NodePublic, publicKey_);
<< log::param(
"PublicKey",
toBase58(TokenType::NodePublic, publicKey_));
if (auto member = app_.cluster().member(publicKey_))
{
@@ -789,7 +808,8 @@ PeerImp::doAccept()
std::unique_lock lock{nameMutex_};
name_ = *member;
}
JLOG(journal_.info()) << "Cluster name: " << *member;
JLOG(journal_.info())
<< "Cluster name: " << log::param("ClusterName", *member);
}
overlay_.activate(shared_from_this());
@@ -1051,8 +1071,10 @@ PeerImp::onMessageBegin(
overlay_.addTxMetrics(
static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
}
JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
<< uncompressed_size << " " << isCompressed;
JLOG(journal_.trace()) << "onMessageBegin: " << log::param("Type", type)
<< " " << log::param("Size", size) << " "
<< log::param("UncompressedSize", uncompressed_size)
<< " " << log::param("IsCompressed", isCompressed);
}
void
@@ -1219,8 +1241,9 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMEndpoints> const& m)
if (!result)
{
JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
<< tm.endpoint() << "}";
JLOG(p_journal_.error())
<< "failed to parse incoming endpoint: {"
<< log::param("EndPoint", tm.endpoint()) << "}";
malformed++;
continue;
}
@@ -1283,13 +1306,20 @@ PeerImp::handleTransaction(
{
auto stx = std::make_shared<STTx const>(sit);
uint256 txID = stx->getTransactionID();
beast::Journal protocolJournal{
p_journal_,
log::attributes(
log::attr("TransactionID", to_string(txID)),
log::attr("RawTransaction", strHex(m->rawtransaction())))};
// Charge strongly for attempting to relay a txn with tfInnerBatchTxn
// LCOV_EXCL_START
if (stx->isFlag(tfInnerBatchTxn))
if (stx->isFlag(tfInnerBatchTxn) &&
getCurrentTransactionRules()->enabled(featureBatch))
{
JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
"tfInnerBatchTxn (handleTransaction).";
JLOG(protocolJournal.warn())
<< "Ignoring Network relayed Tx containing "
"tfInnerBatchTxn (handleTransaction).";
fee_.update(Resource::feeModerateBurdenPeer, "inner batch txn");
return;
}
@@ -1304,7 +1334,8 @@ PeerImp::handleTransaction(
if (any(flags & HashRouterFlags::BAD))
{
fee_.update(Resource::feeUselessData, "known bad");
JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
JLOG(protocolJournal.debug())
<< "Ignoring known bad tx " << txID;
}
// Erase only if the server has seen this tx. If the server has not
@@ -1319,7 +1350,7 @@ PeerImp::handleTransaction(
return;
}
JLOG(p_journal_.debug()) << "Got tx " << txID;
JLOG(protocolJournal.debug()) << "Got tx " << txID;
bool checkSignature = true;
if (cluster())
@@ -1343,7 +1374,7 @@ PeerImp::handleTransaction(
if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min)
{
JLOG(p_journal_.trace())
JLOG(protocolJournal.trace())
<< "No new transactions until synchronized";
}
else if (
@@ -1351,7 +1382,7 @@ PeerImp::handleTransaction(
app_.config().MAX_TRANSACTIONS)
{
overlay_.incJqTransOverflow();
JLOG(p_journal_.info()) << "Transaction queue is full";
JLOG(protocolJournal.info()) << "Transaction queue is full";
}
else
{
@@ -1373,7 +1404,7 @@ PeerImp::handleTransaction(
{
JLOG(p_journal_.warn())
<< "Transaction invalid: " << strHex(m->rawtransaction())
<< ". Exception: " << ex.what();
<< ". Exception: " << log::param("Reason", ex.what());
}
}
@@ -1382,7 +1413,8 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetLedger> const& m)
{
auto badData = [&](std::string const& msg) {
fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
JLOG(p_journal_.warn())
<< "TMGetLedger: " << log::param("PeerMessage", msg);
};
auto const itype{m->itype()};
@@ -1581,7 +1613,8 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMLedgerData> const& m)
{
auto badData = [&](std::string const& msg) {
fee_.update(Resource::feeInvalidData, msg);
JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
JLOG(p_journal_.warn())
<< "TMLedgerData: " << log::param("PeerMessage", msg);
};
// Verify ledger hash
@@ -1863,7 +1896,9 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMStatusChange> const& m)
}
if (peerChangedLedgers)
{
JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
JLOG(p_journal_.debug())
<< "LCL is "
<< log::param("ClosedLedgerHash", closedLedgerHash);
}
else
{
@@ -2850,7 +2885,8 @@ PeerImp::checkTransaction(
{
// charge strongly for relaying batch txns
// LCOV_EXCL_START
if (stx->isFlag(tfInnerBatchTxn))
if (stx->isFlag(tfInnerBatchTxn) &&
getCurrentTransactionRules()->enabled(featureBatch))
{
JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
"tfInnerBatchTxn (checkSignature).";
@@ -2864,9 +2900,6 @@ PeerImp::checkTransaction(
(stx->getFieldU32(sfLastLedgerSequence) <
app_.getLedgerMaster().getValidLedgerIndex()))
{
JLOG(p_journal_.info())
<< "Marking transaction " << stx->getTransactionID()
<< "as BAD because it's expired";
app_.getHashRouter().setFlags(
stx->getTransactionID(), HashRouterFlags::BAD);
charge(Resource::feeUselessData, "expired tx");
@@ -2923,7 +2956,7 @@ PeerImp::checkTransaction(
{
if (!validReason.empty())
{
JLOG(p_journal_.debug())
JLOG(p_journal_.trace())
<< "Exception checking transaction: " << validReason;
}
@@ -2950,7 +2983,7 @@ PeerImp::checkTransaction(
{
if (!reason.empty())
{
JLOG(p_journal_.debug())
JLOG(p_journal_.trace())
<< "Exception checking transaction: " << reason;
}
app_.getHashRouter().setFlags(

View File

@@ -71,8 +71,6 @@ private:
Application& app_;
id_t const id_;
beast::WrappedSink sink_;
beast::WrappedSink p_sink_;
beast::Journal const journal_;
beast::Journal const p_journal_;
std::unique_ptr<stream_type> stream_ptr_;
@@ -456,9 +454,6 @@ private:
void
cancelTimer();
static std::string
makePrefix(id_t id);
// Called when the timer wait completes
void
onTimer(boost::system::error_code const& ec);
@@ -662,10 +657,22 @@ PeerImp::PeerImp(
: Child(overlay)
, app_(app)
, id_(id)
, sink_(app_.journal("Peer"), makePrefix(id))
, p_sink_(app_.journal("Protocol"), makePrefix(id))
, journal_(sink_)
, p_journal_(p_sink_)
, journal_(
app_.journal("Peer"),
log::attributes(
log::attr("NodeID", id),
log::attr("RemoteAddress", to_string(slot->remote_endpoint())),
log::attr(
"PublicKey",
toBase58(TokenType::NodePublic, publicKey))))
, p_journal_(
app_.journal("Protocol"),
log::attributes(
log::attr("NodeID", id),
log::attr("RemoteAddress", to_string(slot->remote_endpoint())),
log::attr(
"PublicKey",
toBase58(TokenType::NodePublic, publicKey))))
, stream_ptr_(std::move(stream_ptr))
, socket_(stream_ptr_->next_layer().socket())
, stream_(*stream_ptr_)

View File

@@ -21,7 +21,7 @@
#define RIPPLE_RPC_CONTEXT_H_INCLUDED
#include <xrpld/core/JobQueue.h>
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpld/rpc/Role.h>
#include <xrpl/beast/utility/Journal.h>

View File

@@ -21,7 +21,7 @@
#define RIPPLE_NET_RPCSUB_H_INCLUDED
#include <xrpld/core/JobQueue.h>
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
#include <boost/asio/io_service.hpp>

View File

@@ -17,7 +17,7 @@
*/
//==============================================================================
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
namespace ripple {

View File

@@ -17,12 +17,8 @@
*/
//==============================================================================
#include <xrpld/app/main/Application.h>
#include <xrpld/core/Config.h>
#include <xrpld/net/HTTPClient.h>
#include <xrpld/net/RPCCall.h>
#include <xrpld/rpc/RPCCall.h>
#include <xrpld/rpc/ServerHandler.h>
#include <xrpld/rpc/detail/RPCHelpers.h>
#include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/Log.h>
@@ -33,7 +29,10 @@
#include <xrpl/json/json_forwards.h>
#include <xrpl/json/json_reader.h>
#include <xrpl/json/to_string.h>
#include <xrpl/net/HTTPClient.h>
#include <xrpl/protocol/ApiVersion.h>
#include <xrpl/protocol/ErrorCodes.h>
#include <xrpl/protocol/PublicKey.h>
#include <xrpl/protocol/RPCErr.h>
#include <xrpl/protocol/SystemParameters.h>
#include <xrpl/protocol/UintTypes.h>
@@ -160,7 +159,7 @@ private:
std::string const& strPk,
TokenType type = TokenType::AccountPublic)
{
if (parseBase58<PublicKey>(type, strPk))
if (parseBase58<ripple::PublicKey>(type, strPk))
return true;
auto pkHex = strUnHex(strPk);
@@ -1508,7 +1507,7 @@ rpcClient(
}
else
{
ServerHandler::Setup setup;
ripple::ServerHandler::Setup setup;
try
{
setup = setup_ServerHandler(

View File

@@ -24,9 +24,9 @@
#include <xrpld/app/misc/NetworkOPs.h>
#include <xrpld/core/Config.h>
#include <xrpld/core/JobQueue.h>
#include <xrpld/net/InfoSub.h>
#include <xrpld/perflog/PerfLog.h>
#include <xrpld/rpc/Context.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpld/rpc/RPCHandler.h>
#include <xrpld/rpc/Role.h>
#include <xrpld/rpc/detail/Handler.h>

View File

@@ -17,8 +17,8 @@
*/
//==============================================================================
#include <xrpld/net/RPCCall.h>
#include <xrpld/net/RPCSub.h>
#include <xrpld/rpc/RPCCall.h>
#include <xrpld/rpc/RPCSub.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/StringUtilities.h>

View File

@@ -20,7 +20,7 @@
#ifndef RIPPLE_RPC_WSINFOSUB_H
#define RIPPLE_RPC_WSINFOSUB_H
#include <xrpld/net/InfoSub.h>
#include <xrpld/rpc/InfoSub.h>
#include <xrpld/rpc/Role.h>
#include <xrpl/beast/net/IPAddressConversion.h>

View File

@@ -114,7 +114,7 @@ getCountsJson(Application& app, int minObjectCount)
ret[jss::treenode_cache_size] =
app.getNodeFamily().getTreeNodeCache()->getCacheSize();
ret[jss::treenode_track_size] =
app.getNodeFamily().getTreeNodeCache()->getTrackSize();
static_cast<int>(app.getNodeFamily().getTreeNodeCache()->size());
std::string uptime;
auto s = UptimeClock::now();

View File

@@ -21,8 +21,8 @@
#include <xrpld/app/main/Application.h>
#include <xrpld/app/misc/NetworkOPs.h>
#include <xrpld/ledger/ReadView.h>
#include <xrpld/net/RPCSub.h>
#include <xrpld/rpc/Context.h>
#include <xrpld/rpc/RPCSub.h>
#include <xrpld/rpc/Role.h>
#include <xrpld/rpc/detail/RPCHelpers.h>