mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-18 18:15:50 +00:00
Compare commits
7 Commits
vlntb/mall
...
ximinez/di
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
730ac9b763 | ||
|
|
13a12c6402 | ||
|
|
362ecbd1cb | ||
|
|
39715d6915 | ||
|
|
fbeae82d61 | ||
|
|
429c15ac0d | ||
|
|
9382fe1c82 |
@@ -6,18 +6,18 @@
|
||||
"sqlite3/3.49.1#8631739a4c9b93bd3d6b753bac548a63%1756234266.869",
|
||||
"soci/4.0.3#a9f8d773cd33e356b5879a4b0564f287%1756234262.318",
|
||||
"snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1759820024.194",
|
||||
"rocksdb/10.5.1#4a197eca381a3e5ae8adf8cffa5aacd0%1762797952.535",
|
||||
"re2/20230301#dfd6e2bf050eb90ddd8729cfb4c844a4%1756234257.976",
|
||||
"protobuf/3.21.12#d927114e28de9f4691a6bbcdd9a529d1%1756234251.614",
|
||||
"openssl/3.5.4#a1d5835cc6ed5c5b8f3cd5b9b5d24205%1760106486.594",
|
||||
"nudb/2.0.9#c62cfd501e57055a7e0d8ee3d5e5427d%1756234237.107",
|
||||
"nudb/2.0.9#fb8dfd1a5557f5e0528114c2da17721e%1763150366.909",
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504%1756234228.999",
|
||||
"libiconv/1.17#1e65319e945f2d31941a9d28cc13c058%1756223727.64",
|
||||
"libbacktrace/cci.20210118#a7691bfccd8caaf66309df196790a5a1%1756230911.03",
|
||||
"libarchive/3.8.1#5cf685686322e906cb42706ab7e099a8%1756234256.696",
|
||||
"jemalloc/5.3.0#e951da9cf599e956cebc117880d2d9f8%1729241615.244",
|
||||
"grpc/1.50.1#02291451d1e17200293a409410d1c4e1%1756234248.958",
|
||||
"doctest/2.4.12#eb9fb352fb2fdfc8abb17ec270945165%1749889324.069",
|
||||
"doctest/2.4.12#eb9fb352fb2fdfc8abb17ec270945165%1762797941.757",
|
||||
"date/3.0.4#f74bbba5a08fa388256688743136cb6f%1756234217.493",
|
||||
"c-ares/1.34.5#b78b91e7cfb1f11ce777a285bbf169c6%1756234217.915",
|
||||
"bzip2/1.0.8#00b4a4658791c1f06914e087f0e792f5%1756234261.716",
|
||||
@@ -53,6 +53,9 @@
|
||||
],
|
||||
"lz4/[>=1.9.4 <2]": [
|
||||
"lz4/1.10.0#59fc63cac7f10fbe8e05c7e62c2f3504"
|
||||
],
|
||||
"sqlite3/3.44.2": [
|
||||
"sqlite3/3.49.1"
|
||||
]
|
||||
},
|
||||
"config_requires": []
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
#ifndef XRPL_BASICS_MALLOCTRIM_H_INCLUDED
|
||||
#define XRPL_BASICS_MALLOCTRIM_H_INCLUDED
|
||||
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Allocator interaction note:
|
||||
// - This facility invokes glibc's malloc_trim(0) on Linux/glibc to request that
|
||||
// ptmalloc return free heap pages to the OS.
|
||||
// - If an alternative allocator (e.g. jemalloc or tcmalloc) is linked or
|
||||
// preloaded (LD_PRELOAD), calling glibc's malloc_trim typically has no effect
|
||||
// on the *active* heap. The call is harmless but may not reclaim memory
|
||||
// because those allocators manage their own arenas.
|
||||
// - Only glibc sbrk/arena space is eligible for trimming; large mmap-backed
|
||||
// allocations are usually returned to the OS on free regardless of trimming.
|
||||
// - Call at known reclamation points (e.g., after cache sweeps / online delete)
|
||||
// and consider rate limiting to avoid churn.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
struct MallocTrimReport
|
||||
{
|
||||
bool supported{false};
|
||||
int trimResult{-1};
|
||||
long rssBeforeKB{-1};
|
||||
long rssAfterKB{-1};
|
||||
|
||||
[[nodiscard]] long
|
||||
deltaKB() const noexcept
|
||||
{
|
||||
if (rssBeforeKB < 0 || rssAfterKB < 0)
|
||||
return 0;
|
||||
return rssAfterKB - rssBeforeKB;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Attempt to return freed memory to the operating system.
|
||||
*
|
||||
* On Linux with glibc malloc, this issues ::malloc_trim(0), which may release
|
||||
* free space from ptmalloc arenas back to the kernel. On other platforms, or if
|
||||
* a different allocator is in use, this function is a no-op and the report will
|
||||
* indicate that trimming is unsupported or had no effect.
|
||||
*
|
||||
* @param tag Optional identifier for logging/debugging purposes.
|
||||
* @param journal Journal for diagnostic logging.
|
||||
* @return Report containing before/after metrics and the trim result.
|
||||
*
|
||||
* @note If an alternative allocator (jemalloc/tcmalloc) is linked or preloaded,
|
||||
* calling glibc's malloc_trim may have no effect on the active heap. The
|
||||
* call is harmless but typically does not reclaim memory under those
|
||||
* allocators.
|
||||
*
|
||||
* @note Only memory served from glibc's sbrk/arena heaps is eligible for trim.
|
||||
* Large allocations satisfied via mmap are usually returned on free
|
||||
* independently of trimming.
|
||||
*
|
||||
* @note Intended for use after operations that free significant memory (e.g.,
|
||||
* cache sweeps, ledger cleanup, online delete). Consider rate limiting.
|
||||
*/
|
||||
MallocTrimReport
|
||||
mallocTrim(std::optional<std::string> const& tag, beast::Journal journal);
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -30,6 +30,7 @@ XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
|
||||
// Check flags in Credential transactions
|
||||
XRPL_FEATURE(DefragDirectories, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (FrozenLPTokenTransfer, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
|
||||
#include <boost/predef.h>
|
||||
|
||||
#include <cstdio>
|
||||
#include <fstream>
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
#include <malloc.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace {
|
||||
pid_t const cachedPid = ::getpid();
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace detail {
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
|
||||
long
|
||||
parseVmRSSkB(std::string const& status)
|
||||
{
|
||||
std::istringstream iss(status);
|
||||
std::string line;
|
||||
|
||||
while (std::getline(iss, line))
|
||||
{
|
||||
// Allow leading spaces/tabs before the key.
|
||||
auto const firstNonWs = line.find_first_not_of(" \t");
|
||||
if (firstNonWs == std::string::npos)
|
||||
continue;
|
||||
|
||||
constexpr char key[] = "VmRSS:";
|
||||
constexpr auto keyLen = sizeof(key) - 1;
|
||||
|
||||
// Require the line (after leading whitespace) to start with "VmRSS:".
|
||||
// Check if we have enough characters and the substring matches.
|
||||
if (firstNonWs + keyLen > line.size() ||
|
||||
line.substr(firstNonWs, keyLen) != key)
|
||||
continue;
|
||||
|
||||
// Move past "VmRSS:" and any following whitespace.
|
||||
auto pos = firstNonWs + keyLen;
|
||||
while (pos < line.size() &&
|
||||
std::isspace(static_cast<unsigned char>(line[pos])))
|
||||
{
|
||||
++pos;
|
||||
}
|
||||
|
||||
long value = -1;
|
||||
if (std::sscanf(line.c_str() + pos, "%ld", &value) == 1)
|
||||
return value;
|
||||
|
||||
// Found the key but couldn't parse a number.
|
||||
return -1;
|
||||
}
|
||||
|
||||
// No VmRSS line found.
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif // __GLIBC__ && BOOST_OS_LINUX
|
||||
|
||||
} // namespace detail
|
||||
|
||||
MallocTrimReport
|
||||
mallocTrim(
|
||||
[[maybe_unused]] std::optional<std::string> const& tag,
|
||||
beast::Journal journal)
|
||||
{
|
||||
MallocTrimReport report;
|
||||
|
||||
#if !(defined(__GLIBC__) && BOOST_OS_LINUX)
|
||||
JLOG(journal.debug()) << "malloc_trim not supported on this platform";
|
||||
#else
|
||||
|
||||
report.supported = true;
|
||||
|
||||
if (journal.debug())
|
||||
{
|
||||
auto readFile = [](std::string const& path) -> std::string {
|
||||
std::ifstream ifs(path);
|
||||
if (!ifs.is_open())
|
||||
return {};
|
||||
return std::string(
|
||||
std::istreambuf_iterator<char>(ifs),
|
||||
std::istreambuf_iterator<char>());
|
||||
};
|
||||
|
||||
std::string const tagStr = tag.value_or("default");
|
||||
std::string const statusPath =
|
||||
"/proc/" + std::to_string(cachedPid) + "/status";
|
||||
|
||||
auto const statusBefore = readFile(statusPath);
|
||||
report.rssBeforeKB = detail::parseVmRSSkB(statusBefore);
|
||||
|
||||
report.trimResult = ::malloc_trim(0);
|
||||
|
||||
auto const statusAfter = readFile(statusPath);
|
||||
report.rssAfterKB = detail::parseVmRSSkB(statusAfter);
|
||||
|
||||
JLOG(journal.debug())
|
||||
<< "malloc_trim tag=" << tagStr << " result=" << report.trimResult
|
||||
<< " rss_before=" << report.rssBeforeKB << "kB"
|
||||
<< " rss_after=" << report.rssAfterKB << "kB"
|
||||
<< " delta=" << report.deltaKB() << "kB";
|
||||
}
|
||||
else
|
||||
{
|
||||
report.trimResult = ::malloc_trim(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
return report;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
@@ -8,6 +8,136 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace directory {
|
||||
|
||||
struct Gap
|
||||
{
|
||||
uint64_t const page;
|
||||
SLE::pointer node;
|
||||
uint64_t const nextPage;
|
||||
SLE::pointer next;
|
||||
};
|
||||
|
||||
std::uint64_t
|
||||
createRoot(
|
||||
ApplyView& view,
|
||||
Keylet const& directory,
|
||||
uint256 const& key,
|
||||
std::function<void(std::shared_ptr<SLE> const&)> const& describe)
|
||||
{
|
||||
auto newRoot = std::make_shared<SLE>(directory);
|
||||
newRoot->setFieldH256(sfRootIndex, directory.key);
|
||||
describe(newRoot);
|
||||
|
||||
STVector256 v;
|
||||
v.push_back(key);
|
||||
newRoot->setFieldV256(sfIndexes, v);
|
||||
|
||||
view.insert(newRoot);
|
||||
return std::uint64_t{0};
|
||||
}
|
||||
|
||||
auto
|
||||
findPreviousPage(ApplyView& view, Keylet const& directory, SLE::ref start)
|
||||
{
|
||||
std::uint64_t page = start->getFieldU64(sfIndexPrevious);
|
||||
|
||||
auto node = start;
|
||||
|
||||
if (page)
|
||||
{
|
||||
node = view.peek(keylet::page(directory, page));
|
||||
if (!node)
|
||||
LogicError("Directory chain: root back-pointer broken.");
|
||||
}
|
||||
|
||||
auto indexes = node->getFieldV256(sfIndexes);
|
||||
return std::make_tuple(page, node, indexes);
|
||||
}
|
||||
|
||||
std::uint64_t
|
||||
insertKey(
|
||||
ApplyView& view,
|
||||
SLE::ref node,
|
||||
std::uint64_t page,
|
||||
bool preserveOrder,
|
||||
STVector256& indexes,
|
||||
uint256 const& key)
|
||||
{
|
||||
if (preserveOrder)
|
||||
{
|
||||
if (std::find(indexes.begin(), indexes.end(), key) != indexes.end())
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.push_back(key);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We can't be sure if this page is already sorted because
|
||||
// it may be a legacy page we haven't yet touched. Take
|
||||
// the time to sort it.
|
||||
std::sort(indexes.begin(), indexes.end());
|
||||
|
||||
auto pos = std::lower_bound(indexes.begin(), indexes.end(), key);
|
||||
|
||||
if (pos != indexes.end() && key == *pos)
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.insert(pos, key);
|
||||
}
|
||||
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
view.update(node);
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<std::uint64_t>
|
||||
insertPage(
|
||||
ApplyView& view,
|
||||
std::uint64_t page,
|
||||
SLE::pointer node,
|
||||
std::uint64_t nextPage,
|
||||
SLE::ref next,
|
||||
uint256 const& key,
|
||||
Keylet const& directory,
|
||||
std::function<void(std::shared_ptr<SLE> const&)> const& describe)
|
||||
{
|
||||
// Check whether we're out of pages.
|
||||
if (++page >= dirNodeMaxPages)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// We are about to create a new node; we'll link it to
|
||||
// the chain first:
|
||||
node->setFieldU64(sfIndexNext, page);
|
||||
view.update(node);
|
||||
|
||||
next->setFieldU64(sfIndexPrevious, page);
|
||||
view.update(next);
|
||||
|
||||
// Insert the new key:
|
||||
STVector256 indexes;
|
||||
indexes.push_back(key);
|
||||
|
||||
node = std::make_shared<SLE>(keylet::page(directory, page));
|
||||
node->setFieldH256(sfRootIndex, directory.key);
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
|
||||
// Save some space by not specifying the value 0 since
|
||||
// it's the default.
|
||||
if (page != 1)
|
||||
node->setFieldU64(sfIndexPrevious, page - 1);
|
||||
if (nextPage)
|
||||
node->setFieldU64(sfIndexNext, nextPage);
|
||||
describe(node);
|
||||
view.insert(node);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
} // namespace directory
|
||||
|
||||
std::optional<std::uint64_t>
|
||||
ApplyView::dirAdd(
|
||||
bool preserveOrder,
|
||||
@@ -15,66 +145,15 @@ ApplyView::dirAdd(
|
||||
uint256 const& key,
|
||||
std::function<void(std::shared_ptr<SLE> const&)> const& describe)
|
||||
{
|
||||
auto root = peek(directory);
|
||||
auto const root = peek(directory);
|
||||
|
||||
if (!root)
|
||||
{
|
||||
// No root, make it.
|
||||
root = std::make_shared<SLE>(directory);
|
||||
root->setFieldH256(sfRootIndex, directory.key);
|
||||
describe(root);
|
||||
|
||||
STVector256 v;
|
||||
v.push_back(key);
|
||||
root->setFieldV256(sfIndexes, v);
|
||||
|
||||
insert(root);
|
||||
return std::uint64_t{0};
|
||||
}
|
||||
|
||||
std::uint64_t page = root->getFieldU64(sfIndexPrevious);
|
||||
|
||||
auto node = root;
|
||||
|
||||
if (page)
|
||||
{
|
||||
node = peek(keylet::page(directory, page));
|
||||
if (!node)
|
||||
LogicError("Directory chain: root back-pointer broken.");
|
||||
}
|
||||
|
||||
auto indexes = node->getFieldV256(sfIndexes);
|
||||
|
||||
// If there's space, we use it:
|
||||
if (indexes.size() < dirNodeMaxEntries)
|
||||
{
|
||||
if (preserveOrder)
|
||||
{
|
||||
if (std::find(indexes.begin(), indexes.end(), key) != indexes.end())
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.push_back(key);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We can't be sure if this page is already sorted because
|
||||
// it may be a legacy page we haven't yet touched. Take
|
||||
// the time to sort it.
|
||||
std::sort(indexes.begin(), indexes.end());
|
||||
|
||||
auto pos = std::lower_bound(indexes.begin(), indexes.end(), key);
|
||||
|
||||
if (pos != indexes.end() && key == *pos)
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.insert(pos, key);
|
||||
}
|
||||
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
update(node);
|
||||
return page;
|
||||
return directory::createRoot(*this, directory, key, describe);
|
||||
}
|
||||
|
||||
//--Conflict start
|
||||
// We rely on modulo arithmetic of unsigned integers (guaranteed in
|
||||
// [basic.fundamental] paragraph 2) to detect page representation overflow.
|
||||
// For signed integers this would be UB, hence static_assert here.
|
||||
@@ -92,30 +171,61 @@ ApplyView::dirAdd(
|
||||
page >= dirNodeMaxPages) // Old pages limit
|
||||
return std::nullopt;
|
||||
|
||||
// We are about to create a new node; we'll link it to
|
||||
// the chain first:
|
||||
node->setFieldU64(sfIndexNext, page);
|
||||
update(node);
|
||||
// The block above and below this comment conflicted, and I don't
|
||||
// fee like resolving it right now, so I'm saving it for later.
|
||||
// Because page is defined twice, it won't build.
|
||||
|
||||
root->setFieldU64(sfIndexPrevious, page);
|
||||
update(root);
|
||||
auto [page, node, indexes] =
|
||||
directory::findPreviousPage(*this, directory, root);
|
||||
//--Conflict end
|
||||
|
||||
// Insert the new key:
|
||||
indexes.clear();
|
||||
indexes.push_back(key);
|
||||
if (rules().enabled(featureDefragDirectories))
|
||||
{
|
||||
// If there are more nodes than just the root, and there's no space in
|
||||
// the last one, walk backwards to find one with space, or to find one
|
||||
// missing.
|
||||
std::optional<directory::Gap> gapPages;
|
||||
while (page && indexes.size() >= dirNodeMaxEntries)
|
||||
{
|
||||
// Find a page with space, or a gap in pages.
|
||||
auto [prevPage, prevNode, prevIndexes] =
|
||||
directory::findPreviousPage(*this, directory, node);
|
||||
if (!gapPages && prevPage != page - 1)
|
||||
gapPages.emplace(prevPage, prevNode, page, node);
|
||||
page = prevPage;
|
||||
node = prevNode;
|
||||
indexes = prevIndexes;
|
||||
}
|
||||
// We looped through all the pages back to the root.
|
||||
if (!page)
|
||||
{
|
||||
// If we found a gap, use it.
|
||||
if (gapPages)
|
||||
{
|
||||
return directory::insertPage(
|
||||
*this,
|
||||
gapPages->page,
|
||||
gapPages->node,
|
||||
gapPages->nextPage,
|
||||
gapPages->next,
|
||||
key,
|
||||
directory,
|
||||
describe);
|
||||
}
|
||||
std::tie(page, node, indexes) =
|
||||
directory::findPreviousPage(*this, directory, root);
|
||||
}
|
||||
}
|
||||
|
||||
node = std::make_shared<SLE>(keylet::page(directory, page));
|
||||
node->setFieldH256(sfRootIndex, directory.key);
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
// If there's space, we use it:
|
||||
if (indexes.size() < dirNodeMaxEntries)
|
||||
{
|
||||
return directory::insertKey(
|
||||
*this, node, page, preserveOrder, indexes, key);
|
||||
}
|
||||
|
||||
// Save some space by not specifying the value 0 since
|
||||
// it's the default.
|
||||
if (page != 1)
|
||||
node->setFieldU64(sfIndexPrevious, page - 1);
|
||||
describe(node);
|
||||
insert(node);
|
||||
|
||||
return page;
|
||||
return directory::insertPage(
|
||||
*this, page, node, 0, root, key, directory, describe);
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
@@ -1329,7 +1329,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
Vault& vault) {
|
||||
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
||||
testcase("insufficient fee");
|
||||
env(tx, fee(env.current()->fees().base), ter(telINSUF_FEE_P));
|
||||
env(tx, fee(env.current()->fees().base - 1), ter(telINSUF_FEE_P));
|
||||
});
|
||||
|
||||
testCase([this](
|
||||
@@ -2074,6 +2074,10 @@ class Vault_test : public beast::unit_test::suite
|
||||
auto const sleMPT = env.le(mptoken);
|
||||
BEAST_EXPECT(sleMPT == nullptr);
|
||||
|
||||
// Use one reserve so the next transaction fails
|
||||
env(ticket::create(owner, 1));
|
||||
env.close();
|
||||
|
||||
// No reserve to create MPToken for asset in VaultWithdraw
|
||||
tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
@@ -2091,7 +2095,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
}
|
||||
},
|
||||
{.requireAuth = false,
|
||||
.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
.initialXRP = acctReserve + incReserve * 4 + 1});
|
||||
|
||||
testCase([this](
|
||||
Env& env,
|
||||
@@ -2980,6 +2984,9 @@ class Vault_test : public beast::unit_test::suite
|
||||
env.le(keylet::line(owner, asset.raw().get<Issue>()));
|
||||
BEAST_EXPECT(trustline == nullptr);
|
||||
|
||||
env(ticket::create(owner, 1));
|
||||
env.close();
|
||||
|
||||
// Fail because not enough reserve to create trust line
|
||||
tx = vault.withdraw(
|
||||
{.depositor = owner,
|
||||
@@ -2995,7 +3002,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(tx);
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 + 1});
|
||||
|
||||
testCase(
|
||||
[&, this](
|
||||
@@ -3016,8 +3023,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(pay(owner, charlie, asset(100)));
|
||||
env.close();
|
||||
|
||||
// Use up some reserve on tickets
|
||||
env(ticket::create(charlie, 2));
|
||||
env(ticket::create(charlie, 3));
|
||||
env.close();
|
||||
|
||||
// Fail because not enough reserve to create MPToken for shares
|
||||
@@ -3035,7 +3041,7 @@ class Vault_test : public beast::unit_test::suite
|
||||
env(tx);
|
||||
env.close();
|
||||
},
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||
CaseArgs{.initialXRP = acctReserve + incReserve * 4 + 1});
|
||||
|
||||
testCase([&, this](
|
||||
Env& env,
|
||||
|
||||
@@ -19,7 +19,6 @@ Vault::create(CreateArgs const& args)
|
||||
jv[jss::TransactionType] = jss::VaultCreate;
|
||||
jv[jss::Account] = args.owner.human();
|
||||
jv[jss::Asset] = to_json(args.asset);
|
||||
jv[jss::Fee] = STAmount(env.current()->fees().increment).getJson();
|
||||
if (args.flags)
|
||||
jv[jss::Flags] = *args.flags;
|
||||
return {jv, keylet};
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
|
||||
#include <boost/predef.h>
|
||||
|
||||
#include <doctest/doctest.h>
|
||||
|
||||
using namespace ripple;
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
namespace ripple::detail {
|
||||
long
|
||||
parseVmRSSkB(std::string const& status);
|
||||
} // namespace ripple::detail
|
||||
#endif
|
||||
|
||||
TEST_CASE("MallocTrimReport structure")
|
||||
{
|
||||
// Test default construction
|
||||
MallocTrimReport report;
|
||||
CHECK(report.supported == false);
|
||||
CHECK(report.trimResult == -1);
|
||||
CHECK(report.rssBeforeKB == -1);
|
||||
CHECK(report.rssAfterKB == -1);
|
||||
CHECK(report.deltaKB() == 0);
|
||||
|
||||
// Test deltaKB calculation - memory freed
|
||||
report.rssBeforeKB = 1000;
|
||||
report.rssAfterKB = 800;
|
||||
CHECK(report.deltaKB() == -200);
|
||||
|
||||
// Test deltaKB calculation - memory increased
|
||||
report.rssBeforeKB = 500;
|
||||
report.rssAfterKB = 600;
|
||||
CHECK(report.deltaKB() == 100);
|
||||
|
||||
// Test deltaKB calculation - no change
|
||||
report.rssBeforeKB = 1234;
|
||||
report.rssAfterKB = 1234;
|
||||
CHECK(report.deltaKB() == 0);
|
||||
}
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
TEST_CASE("parseVmRSSkB")
|
||||
{
|
||||
using ripple::detail::parseVmRSSkB;
|
||||
|
||||
// Test standard format
|
||||
{
|
||||
std::string status = "VmRSS: 123456 kB\n";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == 123456);
|
||||
}
|
||||
|
||||
// Test with multiple lines
|
||||
{
|
||||
std::string status =
|
||||
"Name: rippled\n"
|
||||
"VmPeak: 1234567 kB\n"
|
||||
"VmSize: 1234567 kB\n"
|
||||
"VmRSS: 987654 kB\n"
|
||||
"VmData: 123456 kB\n";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == 987654);
|
||||
}
|
||||
|
||||
// Test with minimal whitespace
|
||||
{
|
||||
std::string status = "VmRSS: 42 kB";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == 42);
|
||||
}
|
||||
|
||||
// Test with extra whitespace
|
||||
{
|
||||
std::string status = "VmRSS: 999999 kB";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == 999999);
|
||||
}
|
||||
|
||||
// Test with tabs
|
||||
{
|
||||
std::string status = "VmRSS:\t\t12345 kB";
|
||||
long result = parseVmRSSkB(status);
|
||||
// Note: tabs are not explicitly handled as spaces, this documents
|
||||
// current behavior
|
||||
CHECK(result == 12345);
|
||||
}
|
||||
|
||||
// Test zero value
|
||||
{
|
||||
std::string status = "VmRSS: 0 kB\n";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == 0);
|
||||
}
|
||||
|
||||
// Test missing VmRSS
|
||||
{
|
||||
std::string status =
|
||||
"Name: rippled\n"
|
||||
"VmPeak: 1234567 kB\n"
|
||||
"VmSize: 1234567 kB\n";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == -1);
|
||||
}
|
||||
|
||||
// Test empty string
|
||||
{
|
||||
std::string status = "";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == -1);
|
||||
}
|
||||
|
||||
// Test malformed data (VmRSS but no number)
|
||||
{
|
||||
std::string status = "VmRSS: \n";
|
||||
long result = parseVmRSSkB(status);
|
||||
// sscanf should fail to parse and return -1 unchanged
|
||||
CHECK(result == -1);
|
||||
}
|
||||
|
||||
// Test malformed data (VmRSS but invalid number)
|
||||
{
|
||||
std::string status = "VmRSS: abc kB\n";
|
||||
long result = parseVmRSSkB(status);
|
||||
// sscanf should fail and return -1 unchanged
|
||||
CHECK(result == -1);
|
||||
}
|
||||
|
||||
// Test partial match (should not match "NotVmRSS:")
|
||||
{
|
||||
std::string status = "NotVmRSS: 123456 kB\n";
|
||||
long result = parseVmRSSkB(status);
|
||||
CHECK(result == -1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_CASE("mallocTrim basic functionality")
|
||||
{
|
||||
beast::Journal journal{beast::Journal::getNullSink()};
|
||||
|
||||
// Test with no tag
|
||||
{
|
||||
MallocTrimReport report = mallocTrim(std::nullopt, journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
// On Linux with glibc, should be supported
|
||||
CHECK(report.supported == true);
|
||||
// trimResult should be 0 or 1 (success indicators)
|
||||
CHECK(report.trimResult >= 0);
|
||||
#else
|
||||
// On other platforms, should be unsupported
|
||||
CHECK(report.supported == false);
|
||||
CHECK(report.trimResult == -1);
|
||||
CHECK(report.rssBeforeKB == -1);
|
||||
CHECK(report.rssAfterKB == -1);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Test with tag
|
||||
{
|
||||
MallocTrimReport report =
|
||||
mallocTrim(std::optional<std::string>("test_tag"), journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
CHECK(report.supported == true);
|
||||
CHECK(report.trimResult >= 0);
|
||||
#else
|
||||
CHECK(report.supported == false);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("mallocTrim with debug logging")
|
||||
{
|
||||
beast::Journal journal{beast::Journal::getNullSink()};
|
||||
|
||||
MallocTrimReport report =
|
||||
mallocTrim(std::optional<std::string>("debug_test"), journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
CHECK(report.supported == true);
|
||||
// The function should complete without crashing
|
||||
#else
|
||||
CHECK(report.supported == false);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_CASE("mallocTrim repeated calls")
|
||||
{
|
||||
beast::Journal journal{beast::Journal::getNullSink()};
|
||||
|
||||
// Call malloc_trim multiple times to ensure it's safe
|
||||
for (int i = 0; i < 5; ++i)
|
||||
{
|
||||
MallocTrimReport report = mallocTrim(
|
||||
std::optional<std::string>("iteration_" + std::to_string(i)),
|
||||
journal);
|
||||
|
||||
#if defined(__GLIBC__) && BOOST_OS_LINUX
|
||||
CHECK(report.supported == true);
|
||||
CHECK(report.trimResult >= 0);
|
||||
#else
|
||||
CHECK(report.supported == false);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@
|
||||
#include <xrpld/core/JobQueue.h>
|
||||
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
|
||||
namespace ripple {
|
||||
@@ -155,8 +154,6 @@ OrderBookDB::update(std::shared_ptr<ReadView const> const& ledger)
|
||||
}
|
||||
|
||||
app_.getLedgerMaster().newOrderBookDB();
|
||||
|
||||
mallocTrim(std::optional<std::string>("OrderBookUpdate"), j_);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -37,7 +37,6 @@
|
||||
#include <xrpld/shamap/NodeFamily.h>
|
||||
|
||||
#include <xrpl/basics/ByteUtilities.h>
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
#include <xrpl/basics/ResolverAsio.h>
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/beast/asio/io_latency_probe.h>
|
||||
@@ -1107,8 +1106,6 @@ public:
|
||||
<< "; size after: " << cachedSLEs_.size();
|
||||
}
|
||||
|
||||
mallocTrim(std::optional<std::string>("doSweep"), m_journal);
|
||||
|
||||
// Set timer to do another sweep later.
|
||||
setSweepTimer();
|
||||
}
|
||||
|
||||
@@ -34,7 +34,6 @@
|
||||
#include <xrpld/rpc/MPTokenIssuanceID.h>
|
||||
#include <xrpld/rpc/ServerHandler.h>
|
||||
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
#include <xrpl/basics/UptimeClock.h>
|
||||
#include <xrpl/basics/mulDiv.h>
|
||||
#include <xrpl/basics/safe_cast.h>
|
||||
@@ -2548,14 +2547,10 @@ NetworkOPsImp::setMode(OperatingMode om)
|
||||
if (mMode == om)
|
||||
return;
|
||||
|
||||
auto const oldMode = mMode.load(std::memory_order_relaxed);
|
||||
mMode = om;
|
||||
|
||||
accounting_.mode(om);
|
||||
|
||||
if (oldMode != OperatingMode::FULL && om == OperatingMode::FULL)
|
||||
mallocTrim(std::optional<std::string>("SyncComplete"), m_journal);
|
||||
|
||||
JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
|
||||
pubServer();
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
|
||||
#include <xrpld/core/ConfigSections.h>
|
||||
|
||||
#include <xrpl/basics/MallocTrim.h>
|
||||
#include <xrpl/beast/core/CurrentThreadName.h>
|
||||
#include <xrpl/nodestore/Scheduler.h>
|
||||
#include <xrpl/nodestore/detail/DatabaseRotatingImp.h>
|
||||
@@ -546,8 +545,6 @@ SHAMapStoreImp::clearCaches(LedgerIndex validatedSeq)
|
||||
{
|
||||
ledgerMaster_->clearLedgerCachePrior(validatedSeq);
|
||||
fullBelowCache_->clear();
|
||||
|
||||
mallocTrim(std::optional<std::string>("clearCaches"), journal_);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -613,8 +610,6 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated)
|
||||
});
|
||||
if (healthWait() == stopping)
|
||||
return;
|
||||
|
||||
mallocTrim(std::optional<std::string>("clearPrior"), journal_);
|
||||
}
|
||||
|
||||
SHAMapStoreImp::HealthResult
|
||||
|
||||
@@ -79,13 +79,6 @@ VaultCreate::preflight(PreflightContext const& ctx)
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
XRPAmount
|
||||
VaultCreate::calculateBaseFee(ReadView const& view, STTx const& tx)
|
||||
{
|
||||
// One reserve increment is typically much greater than one base fee.
|
||||
return calculateOwnerReserveFee(view, tx);
|
||||
}
|
||||
|
||||
TER
|
||||
VaultCreate::preclaim(PreclaimContext const& ctx)
|
||||
{
|
||||
@@ -142,8 +135,9 @@ VaultCreate::doApply()
|
||||
|
||||
if (auto ter = dirLink(view(), account_, vault))
|
||||
return ter;
|
||||
adjustOwnerCount(view(), owner, 1, j_);
|
||||
auto ownerCount = owner->at(sfOwnerCount);
|
||||
// We will create Vault and PseudoAccount, hence increase OwnerCount by 2
|
||||
adjustOwnerCount(view(), owner, 2, j_);
|
||||
auto const ownerCount = owner->at(sfOwnerCount);
|
||||
if (mPriorBalance < view().fees().accountReserve(ownerCount))
|
||||
return tecINSUFFICIENT_RESERVE;
|
||||
|
||||
|
||||
@@ -23,9 +23,6 @@ public:
|
||||
static NotTEC
|
||||
preflight(PreflightContext const& ctx);
|
||||
|
||||
static XRPAmount
|
||||
calculateBaseFee(ReadView const& view, STTx const& tx);
|
||||
|
||||
static TER
|
||||
preclaim(PreclaimContext const& ctx);
|
||||
|
||||
|
||||
@@ -146,7 +146,35 @@ VaultDelete::doApply()
|
||||
return tecHAS_OBLIGATIONS; // LCOV_EXCL_LINE
|
||||
|
||||
// Destroy the pseudo-account.
|
||||
view().erase(view().peek(keylet::account(pseudoID)));
|
||||
auto vaultPseudoSLE = view().peek(keylet::account(pseudoID));
|
||||
if (!vaultPseudoSLE || vaultPseudoSLE->at(~sfVaultID) != vault->key())
|
||||
return tefBAD_LEDGER; // LCOV_EXCL_LINE
|
||||
|
||||
// Making the payment and removing the empty holding should have deleted any
|
||||
// obligations associated with the vault or vault pseudo-account.
|
||||
if (*vaultPseudoSLE->at(sfBalance))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDelete: pseudo-account has a balance";
|
||||
return tecHAS_OBLIGATIONS;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
if (vaultPseudoSLE->at(sfOwnerCount) != 0)
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDelete: pseudo-account still owns objects";
|
||||
return tecHAS_OBLIGATIONS;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
if (view().exists(keylet::ownerDir(pseudoID)))
|
||||
{
|
||||
// LCOV_EXCL_START
|
||||
JLOG(j_.error()) << "VaultDelete: pseudo-account has a directory";
|
||||
return tecHAS_OBLIGATIONS;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
|
||||
view().erase(vaultPseudoSLE);
|
||||
|
||||
// Remove the vault from its owner's directory.
|
||||
auto const ownerID = vault->at(sfOwner);
|
||||
@@ -170,7 +198,9 @@ VaultDelete::doApply()
|
||||
return tefBAD_LEDGER;
|
||||
// LCOV_EXCL_STOP
|
||||
}
|
||||
adjustOwnerCount(view(), owner, -1, j_);
|
||||
|
||||
// We are destroying Vault and PseudoAccount, hence decrease by 2
|
||||
adjustOwnerCount(view(), owner, -2, j_);
|
||||
|
||||
// Destroy the vault.
|
||||
view().erase(vault);
|
||||
|
||||
Reference in New Issue
Block a user