Compare commits

...

5 Commits

Author SHA1 Message Date
Bart
3a8a18c2ca refactor: Use uint256 directly as key instead of void pointer (#6313)
This change replaces `void const*` by `uint256 const&` for database fetches.

Object hashes are expressed using the `uint256` data type, and are converted to `void *` when calling the `fetch` or `fetchBatch` functions. However, in these fetch functions they are converted back to `uint256`, making the conversion process unnecessary. In a few cases the underlying pointer is needed, but that can then be easy obtained via `[hash variable].data()`.
2026-02-25 18:23:34 -05:00
Ayaz Salikhov
65e63ebef3 chore: Update cleanup-workspace to delete old .conan2 dir on macOS (#6412) 2026-02-25 01:12:16 +00:00
Valentin Balaschenko
bdd106d992 Explicitly trim the heap after cache sweeps (#6022)
Limited to Linux/glibc builds.
2026-02-24 21:33:13 +00:00
Valentin Balaschenko
24cbaf76a5 ci: Update prepare-runner action to fix macOS build environment (empty)
Updates XRPLF/actions prepare-runner to version 2cbf48101 which fixes
pip upgrade failures on macOS runners with Homebrew-managed Python.

* This commit was cherry-picked from "release-3.1", but ended up empty
  because the changes are already present. It is included only for
  accounting - to indicate that all changes/commits from the previous
  release will be in the next one.
2026-02-24 12:52:32 -05:00
Valentin Balaschenko
3a805cc646 Disable featureBatch and fixBatchInnerSigs amendments (#6402) 2026-02-24 12:49:59 -05:00
16 changed files with 481 additions and 49 deletions

View File

@@ -101,7 +101,7 @@ jobs:
steps: steps:
- name: Cleanup workspace (macOS and Windows) - name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }} if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View File

@@ -64,7 +64,7 @@ jobs:
steps: steps:
- name: Cleanup workspace (macOS and Windows) - name: Cleanup workspace (macOS and Windows)
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }} if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf uses: XRPLF/actions/cleanup-workspace@c7d9ce5ebb03c752a354889ecd870cadfc2b1cd4
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View File

@@ -0,0 +1,73 @@
#pragma once
#include <xrpl/beast/utility/Journal.h>
#include <chrono>
#include <cstdint>
#include <string_view>
namespace xrpl {
// cSpell:ignore ptmalloc
// -----------------------------------------------------------------------------
// Allocator interaction note:
// - This facility invokes glibc's malloc_trim(0) on Linux/glibc to request that
// ptmalloc return free heap pages to the OS.
// - If an alternative allocator (e.g. jemalloc or tcmalloc) is linked or
// preloaded (LD_PRELOAD), calling glibc's malloc_trim typically has no effect
// on the *active* heap. The call is harmless but may not reclaim memory
// because those allocators manage their own arenas.
// - Only glibc sbrk/arena space is eligible for trimming; large mmap-backed
// allocations are usually returned to the OS on free regardless of trimming.
// - Call at known reclamation points (e.g., after cache sweeps / online delete)
// and consider rate limiting to avoid churn.
// -----------------------------------------------------------------------------
struct MallocTrimReport
{
bool supported{false};
int trimResult{-1};
std::int64_t rssBeforeKB{-1};
std::int64_t rssAfterKB{-1};
std::chrono::microseconds durationUs{-1};
std::int64_t minfltDelta{-1};
std::int64_t majfltDelta{-1};
[[nodiscard]] std::int64_t
deltaKB() const noexcept
{
if (rssBeforeKB < 0 || rssAfterKB < 0)
return 0;
return rssAfterKB - rssBeforeKB;
}
};
/**
* @brief Attempt to return freed memory to the operating system.
*
* On Linux with glibc malloc, this issues ::malloc_trim(0), which may release
* free space from ptmalloc arenas back to the kernel. On other platforms, or if
* a different allocator is in use, this function is a no-op and the report will
* indicate that trimming is unsupported or had no effect.
*
* @param tag Identifier for logging/debugging purposes.
* @param journal Journal for diagnostic logging.
* @return Report containing before/after metrics and the trim result.
*
* @note If an alternative allocator (jemalloc/tcmalloc) is linked or preloaded,
* calling glibc's malloc_trim may have no effect on the active heap. The
* call is harmless but typically does not reclaim memory under those
* allocators.
*
* @note Only memory served from glibc's sbrk/arena heaps is eligible for trim.
* Large allocations satisfied via mmap are usually returned on free
* independently of trimming.
*
* @note Intended for use after operations that free significant memory (e.g.,
* cache sweeps, ledger cleanup, online delete). Consider rate limiting.
*/
MallocTrimReport
mallocTrim(std::string_view tag, beast::Journal journal);
} // namespace xrpl

View File

@@ -77,16 +77,16 @@ public:
If the object is not found or an error is encountered, the If the object is not found or an error is encountered, the
result will indicate the condition. result will indicate the condition.
@note This will be called concurrently. @note This will be called concurrently.
@param key A pointer to the key data. @param hash The hash of the object.
@param pObject [out] The created object if successful. @param pObject [out] The created object if successful.
@return The result of the operation. @return The result of the operation.
*/ */
virtual Status virtual Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) = 0; fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) = 0;
/** Fetch a batch synchronously. */ /** Fetch a batch synchronously. */
virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status> virtual std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) = 0; fetchBatch(std::vector<uint256> const& hashes) = 0;
/** Store a single object. /** Store a single object.
Depending on the implementation this may happen immediately Depending on the implementation this may happen immediately

View File

@@ -15,9 +15,10 @@
// Add new amendments to the top of this list. // Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order. // Keep it sorted in reverse chronological order.
XRPL_FIX (PermissionedDomainInvariant, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PermissionedDomainInvariant, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (BatchInnerSigs, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
@@ -31,7 +32,7 @@ XRPL_FEATURE(TokenEscrow, Supported::yes, VoteBehavior::DefaultNo
XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (EnforceNFTokenTrustlineV2, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (AMMv1_3, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDEX, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Batch, Supported::no, VoteBehavior::DefaultNo)
XRPL_FEATURE(SingleAssetVault, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(SingleAssetVault, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
// Check flags in Credential transactions // Check flags in Credential transactions

View File

@@ -0,0 +1,157 @@
#include <xrpl/basics/Log.h>
#include <xrpl/basics/MallocTrim.h>
#include <boost/predef.h>
#include <chrono>
#include <cstdint>
#include <cstdio>
#include <fstream>
#include <sstream>
#if defined(__GLIBC__) && BOOST_OS_LINUX
#include <sys/resource.h>
#include <malloc.h>
#include <unistd.h>
// Require RUSAGE_THREAD for thread-scoped page fault tracking
#ifndef RUSAGE_THREAD
#error "MallocTrim rusage instrumentation requires RUSAGE_THREAD on Linux/glibc"
#endif
namespace {
bool
getRusageThread(struct rusage& ru)
{
return ::getrusage(RUSAGE_THREAD, &ru) == 0; // LCOV_EXCL_LINE
}
} // namespace
#endif
namespace xrpl {
namespace detail {
// cSpell:ignore statm
#if defined(__GLIBC__) && BOOST_OS_LINUX
inline int
mallocTrimWithPad(std::size_t padBytes)
{
return ::malloc_trim(padBytes);
}
long
parseStatmRSSkB(std::string const& statm)
{
// /proc/self/statm format: size resident shared text lib data dt
// We want the second field (resident) which is in pages
std::istringstream iss(statm);
long size, resident;
if (!(iss >> size >> resident))
return -1;
// Convert pages to KB
long const pageSize = ::sysconf(_SC_PAGESIZE);
if (pageSize <= 0)
return -1;
return (resident * pageSize) / 1024;
}
#endif // __GLIBC__ && BOOST_OS_LINUX
} // namespace detail
MallocTrimReport
mallocTrim(std::string_view tag, beast::Journal journal)
{
// LCOV_EXCL_START
MallocTrimReport report;
#if !(defined(__GLIBC__) && BOOST_OS_LINUX)
JLOG(journal.debug()) << "malloc_trim not supported on this platform (tag=" << tag << ")";
#else
// Keep glibc malloc_trim padding at 0 (default): 12h Mainnet tests across 0/256KB/1MB/16MB
// showed no clear, consistent benefit from custom padding—0 provided the best overall balance
// of RSS reduction and trim-latency stability without adding a tuning surface.
constexpr std::size_t TRIM_PAD = 0;
report.supported = true;
if (journal.debug())
{
auto readFile = [](std::string const& path) -> std::string {
std::ifstream ifs(path, std::ios::in | std::ios::binary);
if (!ifs.is_open())
return {};
// /proc files are often not seekable; read as a stream.
std::ostringstream oss;
oss << ifs.rdbuf();
return oss.str();
};
std::string const tagStr{tag};
std::string const statmPath = "/proc/self/statm";
auto const statmBefore = readFile(statmPath);
long const rssBeforeKB = detail::parseStatmRSSkB(statmBefore);
struct rusage ru0{};
bool const have_ru0 = getRusageThread(ru0);
auto const t0 = std::chrono::steady_clock::now();
report.trimResult = detail::mallocTrimWithPad(TRIM_PAD);
auto const t1 = std::chrono::steady_clock::now();
struct rusage ru1{};
bool const have_ru1 = getRusageThread(ru1);
auto const statmAfter = readFile(statmPath);
long const rssAfterKB = detail::parseStatmRSSkB(statmAfter);
// Populate report fields
report.rssBeforeKB = rssBeforeKB;
report.rssAfterKB = rssAfterKB;
report.durationUs = std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0);
if (have_ru0 && have_ru1)
{
report.minfltDelta = ru1.ru_minflt - ru0.ru_minflt;
report.majfltDelta = ru1.ru_majflt - ru0.ru_majflt;
}
std::int64_t const deltaKB = (rssBeforeKB < 0 || rssAfterKB < 0)
? 0
: (static_cast<std::int64_t>(rssAfterKB) - static_cast<std::int64_t>(rssBeforeKB));
JLOG(journal.debug()) << "malloc_trim tag=" << tagStr << " result=" << report.trimResult
<< " pad=" << TRIM_PAD << " bytes"
<< " rss_before=" << rssBeforeKB << "kB"
<< " rss_after=" << rssAfterKB << "kB"
<< " delta=" << deltaKB << "kB"
<< " duration_us=" << report.durationUs.count()
<< " minflt_delta=" << report.minfltDelta
<< " majflt_delta=" << report.majfltDelta;
}
else
{
report.trimResult = detail::mallocTrimWithPad(TRIM_PAD);
}
#endif
return report;
// LCOV_EXCL_STOP
}
} // namespace xrpl

View File

@@ -33,7 +33,7 @@ DatabaseNodeImp::fetchNodeObject(
try try
{ {
status = backend_->fetch(hash.data(), &nodeObject); status = backend_->fetch(hash, &nodeObject);
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {
@@ -68,18 +68,10 @@ DatabaseNodeImp::fetchBatch(std::vector<uint256> const& hashes)
using namespace std::chrono; using namespace std::chrono;
auto const before = steady_clock::now(); auto const before = steady_clock::now();
std::vector<uint256 const*> batch{};
batch.reserve(hashes.size());
for (size_t i = 0; i < hashes.size(); ++i)
{
auto const& hash = hashes[i];
batch.push_back(&hash);
}
// Get the node objects that match the hashes from the backend. To protect // Get the node objects that match the hashes from the backend. To protect
// against the backends returning fewer or more results than expected, the // against the backends returning fewer or more results than expected, the
// container is resized to the number of hashes. // container is resized to the number of hashes.
auto results = backend_->fetchBatch(batch).first; auto results = backend_->fetchBatch(hashes).first;
XRPL_ASSERT( XRPL_ASSERT(
results.size() == hashes.size() || results.empty(), results.size() == hashes.size() || results.empty(),
"number of output objects either matches number of input hashes or is empty"); "number of output objects either matches number of input hashes or is empty");

View File

@@ -105,7 +105,7 @@ DatabaseRotatingImp::fetchNodeObject(
std::shared_ptr<NodeObject> nodeObject; std::shared_ptr<NodeObject> nodeObject;
try try
{ {
status = backend->fetch(hash.data(), &nodeObject); status = backend->fetch(hash, &nodeObject);
} }
catch (std::exception const& e) catch (std::exception const& e)
{ {

View File

@@ -116,10 +116,9 @@ public:
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
Status Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{ {
XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database"); XRPL_ASSERT(db_, "xrpl::NodeStore::MemoryBackend::fetch : non-null database");
uint256 const hash(uint256::fromVoid(key));
std::lock_guard _(db_->mutex); std::lock_guard _(db_->mutex);
@@ -134,14 +133,14 @@ public:
} }
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status> std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override fetchBatch(std::vector<uint256> const& hashes) override
{ {
std::vector<std::shared_ptr<NodeObject>> results; std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size()); results.reserve(hashes.size());
for (auto const& h : hashes) for (auto const& h : hashes)
{ {
std::shared_ptr<NodeObject> nObj; std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj); Status status = fetch(h, &nObj);
if (status != ok) if (status != ok)
results.push_back({}); results.push_back({});
else else

View File

@@ -179,17 +179,17 @@ public:
} }
Status Status
fetch(void const* key, std::shared_ptr<NodeObject>* pno) override fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pno) override
{ {
Status status; Status status;
pno->reset(); pno->reset();
nudb::error_code ec; nudb::error_code ec;
db_.fetch( db_.fetch(
key, hash.data(),
[key, pno, &status](void const* data, std::size_t size) { [&hash, pno, &status](void const* data, std::size_t size) {
nudb::detail::buffer bf; nudb::detail::buffer bf;
auto const result = nodeobject_decompress(data, size, bf); auto const result = nodeobject_decompress(data, size, bf);
DecodedBlob decoded(key, result.first, result.second); DecodedBlob decoded(hash.data(), result.first, result.second);
if (!decoded.wasOk()) if (!decoded.wasOk())
{ {
status = dataCorrupt; status = dataCorrupt;
@@ -207,14 +207,14 @@ public:
} }
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status> std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override fetchBatch(std::vector<uint256> const& hashes) override
{ {
std::vector<std::shared_ptr<NodeObject>> results; std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size()); results.reserve(hashes.size());
for (auto const& h : hashes) for (auto const& h : hashes)
{ {
std::shared_ptr<NodeObject> nObj; std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj); Status status = fetch(h, &nObj);
if (status != ok) if (status != ok)
results.push_back({}); results.push_back({});
else else

View File

@@ -36,13 +36,13 @@ public:
} }
Status Status
fetch(void const*, std::shared_ptr<NodeObject>*) override fetch(uint256 const&, std::shared_ptr<NodeObject>*) override
{ {
return notFound; return notFound;
} }
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status> std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override fetchBatch(std::vector<uint256> const& hashes) override
{ {
return {}; return {};
} }

View File

@@ -244,7 +244,7 @@ public:
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
Status Status
fetch(void const* key, std::shared_ptr<NodeObject>* pObject) override fetch(uint256 const& hash, std::shared_ptr<NodeObject>* pObject) override
{ {
XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database"); XRPL_ASSERT(m_db, "xrpl::NodeStore::RocksDBBackend::fetch : non-null database");
pObject->reset(); pObject->reset();
@@ -252,7 +252,7 @@ public:
Status status(ok); Status status(ok);
rocksdb::ReadOptions const options; rocksdb::ReadOptions const options;
rocksdb::Slice const slice(static_cast<char const*>(key), m_keyBytes); rocksdb::Slice const slice(std::bit_cast<char const*>(hash.data()), m_keyBytes);
std::string string; std::string string;
@@ -260,7 +260,7 @@ public:
if (getStatus.ok()) if (getStatus.ok())
{ {
DecodedBlob decoded(key, string.data(), string.size()); DecodedBlob decoded(hash.data(), string.data(), string.size());
if (decoded.wasOk()) if (decoded.wasOk())
{ {
@@ -295,14 +295,14 @@ public:
} }
std::pair<std::vector<std::shared_ptr<NodeObject>>, Status> std::pair<std::vector<std::shared_ptr<NodeObject>>, Status>
fetchBatch(std::vector<uint256 const*> const& hashes) override fetchBatch(std::vector<uint256> const& hashes) override
{ {
std::vector<std::shared_ptr<NodeObject>> results; std::vector<std::shared_ptr<NodeObject>> results;
results.reserve(hashes.size()); results.reserve(hashes.size());
for (auto const& h : hashes) for (auto const& h : hashes)
{ {
std::shared_ptr<NodeObject> nObj; std::shared_ptr<NodeObject> nObj;
Status status = fetch(h->begin(), &nObj); Status status = fetch(h, &nObj);
if (status != ok) if (status != ok)
results.push_back({}); results.push_back({});
else else
@@ -332,9 +332,8 @@ public:
EncodedBlob encoded(e); EncodedBlob encoded(e);
wb.Put( wb.Put(
rocksdb::Slice(reinterpret_cast<char const*>(encoded.getKey()), m_keyBytes), rocksdb::Slice(std::bit_cast<char const*>(encoded.getKey()), m_keyBytes),
rocksdb::Slice( rocksdb::Slice(std::bit_cast<char const*>(encoded.getData()), encoded.getSize()));
reinterpret_cast<char const*>(encoded.getData()), encoded.getSize()));
} }
rocksdb::WriteOptions const options; rocksdb::WriteOptions const options;

View File

@@ -138,7 +138,7 @@ public:
{ {
std::shared_ptr<NodeObject> object; std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object); Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == ok); BEAST_EXPECT(status == ok);
@@ -158,7 +158,7 @@ public:
{ {
std::shared_ptr<NodeObject> object; std::shared_ptr<NodeObject> object;
Status const status = backend.fetch(batch[i]->getHash().cbegin(), &object); Status const status = backend.fetch(batch[i]->getHash(), &object);
BEAST_EXPECT(status == notFound); BEAST_EXPECT(status == notFound);
} }

View File

@@ -314,7 +314,7 @@ public:
std::shared_ptr<NodeObject> obj; std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result; std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_)); obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result); backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj)); suite_.expect(result && isSame(result, obj));
} }
catch (std::exception const& e) catch (std::exception const& e)
@@ -377,9 +377,9 @@ public:
{ {
try try
{ {
auto const key = seq2_.key(i); auto const hash = seq2_.key(i);
std::shared_ptr<NodeObject> result; std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result); backend_.fetch(hash, &result);
suite_.expect(!result); suite_.expect(!result);
} }
catch (std::exception const& e) catch (std::exception const& e)
@@ -449,9 +449,9 @@ public:
{ {
if (rand_(gen_) < missingNodePercent) if (rand_(gen_) < missingNodePercent)
{ {
auto const key = seq2_.key(dist_(gen_)); auto const hash = seq2_.key(dist_(gen_));
std::shared_ptr<NodeObject> result; std::shared_ptr<NodeObject> result;
backend_.fetch(key.data(), &result); backend_.fetch(hash, &result);
suite_.expect(!result); suite_.expect(!result);
} }
else else
@@ -459,7 +459,7 @@ public:
std::shared_ptr<NodeObject> obj; std::shared_ptr<NodeObject> obj;
std::shared_ptr<NodeObject> result; std::shared_ptr<NodeObject> result;
obj = seq1_.obj(dist_(gen_)); obj = seq1_.obj(dist_(gen_));
backend_.fetch(obj->getHash().data(), &result); backend_.fetch(obj->getHash(), &result);
suite_.expect(result && isSame(result, obj)); suite_.expect(result && isSame(result, obj));
} }
} }
@@ -540,8 +540,7 @@ public:
std::shared_ptr<NodeObject> result; std::shared_ptr<NodeObject> result;
auto const j = older_(gen_); auto const j = older_(gen_);
obj = seq1_.obj(j); obj = seq1_.obj(j);
std::shared_ptr<NodeObject> result1; backend_.fetch(obj->getHash(), &result);
backend_.fetch(obj->getHash().data(), &result);
suite_.expect(result != nullptr); suite_.expect(result != nullptr);
suite_.expect(isSame(result, obj)); suite_.expect(isSame(result, obj));
} }
@@ -559,7 +558,7 @@ public:
std::shared_ptr<NodeObject> result; std::shared_ptr<NodeObject> result;
auto const j = recent_(gen_); auto const j = recent_(gen_);
obj = seq1_.obj(j); obj = seq1_.obj(j);
backend_.fetch(obj->getHash().data(), &result); backend_.fetch(obj->getHash(), &result);
suite_.expect(!result || isSame(result, obj)); suite_.expect(!result || isSame(result, obj));
break; break;
} }

View File

@@ -0,0 +1,209 @@
#include <xrpl/basics/MallocTrim.h>
#include <boost/predef.h>
#include <gtest/gtest.h>
using namespace xrpl;
// cSpell:ignore statm
#if defined(__GLIBC__) && BOOST_OS_LINUX
namespace xrpl::detail {
long
parseStatmRSSkB(std::string const& statm);
} // namespace xrpl::detail
#endif
TEST(MallocTrimReport, structure)
{
// Test default construction
MallocTrimReport report;
EXPECT_EQ(report.supported, false);
EXPECT_EQ(report.trimResult, -1);
EXPECT_EQ(report.rssBeforeKB, -1);
EXPECT_EQ(report.rssAfterKB, -1);
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
EXPECT_EQ(report.minfltDelta, -1);
EXPECT_EQ(report.majfltDelta, -1);
EXPECT_EQ(report.deltaKB(), 0);
// Test deltaKB calculation - memory freed
report.rssBeforeKB = 1000;
report.rssAfterKB = 800;
EXPECT_EQ(report.deltaKB(), -200);
// Test deltaKB calculation - memory increased
report.rssBeforeKB = 500;
report.rssAfterKB = 600;
EXPECT_EQ(report.deltaKB(), 100);
// Test deltaKB calculation - no change
report.rssBeforeKB = 1234;
report.rssAfterKB = 1234;
EXPECT_EQ(report.deltaKB(), 0);
}
#if defined(__GLIBC__) && BOOST_OS_LINUX
TEST(parseStatmRSSkB, standard_format)
{
using xrpl::detail::parseStatmRSSkB;
// Test standard format: size resident shared text lib data dt
// Assuming 4KB page size: resident=1000 pages = 4000 KB
{
std::string statm = "25365 1000 2377 0 0 5623 0";
long result = parseStatmRSSkB(statm);
// Note: actual result depends on system page size
// On most systems it's 4KB, so 1000 pages = 4000 KB
EXPECT_GT(result, 0);
}
// Test with newline
{
std::string statm = "12345 2000 1234 0 0 3456 0\n";
long result = parseStatmRSSkB(statm);
EXPECT_GT(result, 0);
}
// Test with tabs
{
std::string statm = "12345\t2000\t1234\t0\t0\t3456\t0";
long result = parseStatmRSSkB(statm);
EXPECT_GT(result, 0);
}
// Test zero resident pages
{
std::string statm = "25365 0 2377 0 0 5623 0";
long result = parseStatmRSSkB(statm);
EXPECT_EQ(result, 0);
}
// Test with extra whitespace
{
std::string statm = " 25365 1000 2377 ";
long result = parseStatmRSSkB(statm);
EXPECT_GT(result, 0);
}
// Test empty string
{
std::string statm = "";
long result = parseStatmRSSkB(statm);
EXPECT_EQ(result, -1);
}
// Test malformed data (only one field)
{
std::string statm = "25365";
long result = parseStatmRSSkB(statm);
EXPECT_EQ(result, -1);
}
// Test malformed data (non-numeric)
{
std::string statm = "abc def ghi";
long result = parseStatmRSSkB(statm);
EXPECT_EQ(result, -1);
}
// Test malformed data (second field non-numeric)
{
std::string statm = "25365 abc 2377";
long result = parseStatmRSSkB(statm);
EXPECT_EQ(result, -1);
}
}
#endif
TEST(mallocTrim, without_debug_logging)
{
beast::Journal journal{beast::Journal::getNullSink()};
MallocTrimReport report = mallocTrim("without_debug", journal);
#if defined(__GLIBC__) && BOOST_OS_LINUX
EXPECT_EQ(report.supported, true);
EXPECT_GE(report.trimResult, 0);
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
EXPECT_EQ(report.minfltDelta, -1);
EXPECT_EQ(report.majfltDelta, -1);
#else
EXPECT_EQ(report.supported, false);
EXPECT_EQ(report.trimResult, -1);
EXPECT_EQ(report.rssBeforeKB, -1);
EXPECT_EQ(report.rssAfterKB, -1);
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
EXPECT_EQ(report.minfltDelta, -1);
EXPECT_EQ(report.majfltDelta, -1);
#endif
}
TEST(mallocTrim, empty_tag)
{
beast::Journal journal{beast::Journal::getNullSink()};
MallocTrimReport report = mallocTrim("", journal);
#if defined(__GLIBC__) && BOOST_OS_LINUX
EXPECT_EQ(report.supported, true);
EXPECT_GE(report.trimResult, 0);
#else
EXPECT_EQ(report.supported, false);
#endif
}
TEST(mallocTrim, with_debug_logging)
{
struct DebugSink : public beast::Journal::Sink
{
DebugSink() : Sink(beast::severities::kDebug, false)
{
}
void
write(beast::severities::Severity, std::string const&) override
{
}
void
writeAlways(beast::severities::Severity, std::string const&) override
{
}
};
DebugSink sink;
beast::Journal journal{sink};
MallocTrimReport report = mallocTrim("debug_test", journal);
#if defined(__GLIBC__) && BOOST_OS_LINUX
EXPECT_EQ(report.supported, true);
EXPECT_GE(report.trimResult, 0);
EXPECT_GE(report.durationUs.count(), 0);
EXPECT_GE(report.minfltDelta, 0);
EXPECT_GE(report.majfltDelta, 0);
#else
EXPECT_EQ(report.supported, false);
EXPECT_EQ(report.trimResult, -1);
EXPECT_EQ(report.durationUs, std::chrono::microseconds{-1});
EXPECT_EQ(report.minfltDelta, -1);
EXPECT_EQ(report.majfltDelta, -1);
#endif
}
TEST(mallocTrim, repeated_calls)
{
beast::Journal journal{beast::Journal::getNullSink()};
// Call malloc_trim multiple times to ensure it's safe
for (int i = 0; i < 5; ++i)
{
MallocTrimReport report = mallocTrim("iteration_" + std::to_string(i), journal);
#if defined(__GLIBC__) && BOOST_OS_LINUX
EXPECT_EQ(report.supported, true);
EXPECT_GE(report.trimResult, 0);
#else
EXPECT_EQ(report.supported, false);
#endif
}
}

View File

@@ -31,6 +31,7 @@
#include <xrpld/shamap/NodeFamily.h> #include <xrpld/shamap/NodeFamily.h>
#include <xrpl/basics/ByteUtilities.h> #include <xrpl/basics/ByteUtilities.h>
#include <xrpl/basics/MallocTrim.h>
#include <xrpl/basics/ResolverAsio.h> #include <xrpl/basics/ResolverAsio.h>
#include <xrpl/basics/random.h> #include <xrpl/basics/random.h>
#include <xrpl/beast/asio/io_latency_probe.h> #include <xrpl/beast/asio/io_latency_probe.h>
@@ -1053,6 +1054,8 @@ public:
<< "; size after: " << cachedSLEs_.size(); << "; size after: " << cachedSLEs_.size();
} }
mallocTrim("doSweep", m_journal);
// Set timer to do another sweep later. // Set timer to do another sweep later.
setSweepTimer(); setSweepTimer();
} }