Compare commits

..

9 Commits

Author SHA1 Message Date
RichardAH
48ed253ddb Merge branch 'dev' into flap_fix 2025-10-18 16:26:49 +10:00
Richard Holland
49138aa0ab clang 2025-10-18 11:56:51 +11:00
Richard Holland
2dfe1fbe89 ensure fallthrough doesn't execute wrong codepath 2025-10-16 19:35:00 +11:00
Richard Holland
945ad4869c testcase 2025-10-16 17:31:31 +11:00
Richard Holland
d5ff8b7010 bug 2025-10-16 17:10:35 +11:00
RichardAH
596b080a6b Merge branch 'dev' into flap_fix 2025-10-16 15:54:59 +10:00
RichardAH
c101aa0920 Merge branch 'dev' into flap_fix 2025-10-15 12:17:18 +10:00
RichardAH
83e231941a Merge branch 'dev' into flap_fix 2025-10-12 15:10:14 +10:00
Richard Holland
8c955da7cf import_vl_keys logic fix untested compiling 2025-10-11 12:51:04 +11:00
18 changed files with 66 additions and 649 deletions

View File

@@ -965,7 +965,6 @@ if (tests)
src/test/nodestore/Basics_test.cpp
src/test/nodestore/DatabaseShard_test.cpp
src/test/nodestore/Database_test.cpp
src/test/nodestore/NuDBFactory_test.cpp
src/test/nodestore/Timing_test.cpp
src/test/nodestore/import_test.cpp
src/test/nodestore/varint_test.cpp

View File

@@ -176,11 +176,10 @@ existing maintainer without a vote.
## Current Maintainers
* [Richard Holland](https://github.com/RichardAH) (XRPL Labs + INFTF)
* [Denis Angell](https://github.com/dangell7) (XRPL Labs + INFTF)
* [Wietse Wind](https://github.com/WietseWind) (XRPL Labs + INFTF)
* [tequ](https://github.com/tequdev) (Independent + INFTF)
* [Richard Holland](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation)
* [Denis Angell](https://github.com/dangell7) (XRPL Labs + XRP Ledger Foundation)
* [Wietse Wind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation)
[1]: https://docs.github.com/en/get-started/quickstart/contributing-to-projects
[2]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits
[2]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits

View File

@@ -1127,39 +1127,6 @@
# it must be defined with the same value in both
# sections.
#
# Optional keys for NuDB only:
#
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
# Must be a power of 2 between 4096 and 32768. Default is 4096.
#
# This parameter controls the fundamental storage unit
# size for NuDB's internal data structures. The choice
# of block size can significantly impact performance
# depending on your storage hardware and filesystem:
#
# - 4096 bytes: Optimal for most standard SSDs and
# traditional filesystems (ext4, NTFS, HFS+).
# Provides good balance of performance and storage
# efficiency. Recommended for most deployments.
#
# - 8192-16384 bytes: May improve performance on
# high-end NVMe SSDs and copy-on-write filesystems
# like ZFS or Btrfs that benefit from larger block
# alignment. Can reduce metadata overhead for large
# databases.
#
# - 32768 bytes (32K): Maximum supported block size
# for high-performance scenarios with very fast
# storage. May increase memory usage and reduce
# efficiency for smaller databases.
#
# Note: This setting cannot be changed after database
# creation without rebuilding the entire database.
# Choose carefully based on your hardware and expected
# database size.
#
# Example: nudb_block_size=4096
#
# These keys modify the behavior of online_delete, and thus are only
# relevant if online_delete is defined and non-zero:

View File

@@ -62,9 +62,6 @@
#define sfEmitGeneration ((2U << 16U) + 46U)
#define sfLockCount ((2U << 16U) + 49U)
#define sfFirstNFTokenSequence ((2U << 16U) + 50U)
#define sfStartTime ((2U << 16U) + 93U)
#define sfRepeatCount ((2U << 16U) + 94U)
#define sfDelaySeconds ((2U << 16U) + 95U)
#define sfXahauActivationLgrSeq ((2U << 16U) + 96U)
#define sfImportSequence ((2U << 16U) + 97U)
#define sfRewardTime ((2U << 16U) + 98U)
@@ -132,7 +129,6 @@
#define sfGovernanceFlags ((5U << 16U) + 99U)
#define sfGovernanceMarks ((5U << 16U) + 98U)
#define sfEmittedTxnID ((5U << 16U) + 97U)
#define sfCron ((5U << 16U) + 95U)
#define sfAmount ((6U << 16U) + 1U)
#define sfBalance ((6U << 16U) + 2U)
#define sfLimitAmount ((6U << 16U) + 3U)
@@ -240,4 +236,4 @@
#define sfActiveValidators ((15U << 16U) + 95U)
#define sfImportVLKeys ((15U << 16U) + 94U)
#define sfHookEmissions ((15U << 16U) + 93U)
#define sfAmounts ((15U << 16U) + 92U)
#define sfAmounts ((15U << 16U) + 92U)

View File

@@ -31,8 +31,6 @@
#define ttURITOKEN_BUY 47
#define ttURITOKEN_CREATE_SELL_OFFER 48
#define ttURITOKEN_CANCEL_SELL_OFFER 49
#define ttCRON 92
#define ttCRON_SET 93
#define ttREMIT 95
#define ttGENESIS_MINT 96
#define ttIMPORT 97
@@ -42,4 +40,4 @@
#define ttFEE 101
#define ttUNL_MODIFY 102
#define ttEMIT_FAILURE 103
#define ttUNL_REPORT 104
#define ttUNL_REPORT 104

View File

@@ -121,11 +121,11 @@ Cron::doApply()
uint32_t delay = sleCron->getFieldU32(sfDelaySeconds);
uint32_t recur = sleCron->getFieldU32(sfRepeatCount);
uint32_t lastStartTime = sleCron->getFieldU32(sfStartTime);
uint32_t currentTime = view.parentCloseTime().time_since_epoch().count();
// do all this sanity checking before we modify the ledger...
uint32_t afterTime = lastStartTime + delay;
if (afterTime < lastStartTime)
uint32_t afterTime = currentTime + delay;
if (afterTime < currentTime)
return tefINTERNAL;
// in all circumstances the Cron object is deleted...
@@ -163,7 +163,6 @@ Cron::doApply()
sleCron->setFieldU64(sfOwnerNode, *page);
sleCron->setFieldU32(sfDelaySeconds, delay);
sleCron->setFieldU32(sfRepeatCount, recur - 1);
sleCron->setFieldU32(sfStartTime, afterTime);
sleCron->setAccountID(sfOwner, id);
sle->setFieldH256(sfCron, klCron.key);

View File

@@ -51,74 +51,47 @@ SetCron::preflight(PreflightContext const& ctx)
return temINVALID_FLAG;
}
// DelaySeconds (D), RepeatCount (R), StartTime (S)
// DRS - Set Cron with Delay and Repeat and StartTime
// DR- - Invalid(StartTime is required)
// D-S - Invalid (both DelaySeconds and RepeatCount are required)
// -RS - Invalid (both DelaySeconds and RepeatCount are required)
// --S - Onetime cron with StartTime only
// DelaySeconds (D), RepeatCount (R)
// DR - Set Cron with Delay and Repeat
// D- - Set Cron (once off) with Delay only (repat implicitly 0)
// -R - Invalid
// -- - Clear any existing cron (succeeds even if there isn't one) / with
// tfCronUnset flag set
bool const hasDelay = tx.isFieldPresent(sfDelaySeconds);
bool const hasRepeat = tx.isFieldPresent(sfRepeatCount);
bool const hasStartTime = tx.isFieldPresent(sfStartTime);
if (tx.isFlag(tfCronUnset))
{
// delete operation
if (hasDelay || hasRepeat || hasStartTime)
if (hasDelay || hasRepeat)
{
JLOG(j.debug()) << "SetCron: tfCronUnset flag cannot be used with "
"DelaySeconds, RepeatCount or StartTime.";
"DelaySeconds or RepeatCount.";
return temMALFORMED;
}
}
else
{
// create operation
if (!hasStartTime)
if (!hasDelay)
{
JLOG(j.debug())
<< "SetCron: StartTime is required. Use StartTime=0 for "
"immediate execution, or specify a future timestamp.";
return temMALFORMED;
}
if ((!hasDelay && hasRepeat) || (hasDelay && !hasRepeat))
{
JLOG(j.debug())
<< "SetCron: DelaySeconds and RepeatCount must both be present "
"for recurring crons, or both absent for one-off crons.";
JLOG(j.debug()) << "SetCron: DelaySeconds must be "
"specified to create a cron.";
return temMALFORMED;
}
// check delay is not too high
if (hasDelay)
auto delay = tx.getFieldU32(sfDelaySeconds);
if (delay > 31536000UL /* 365 days in seconds */)
{
auto delay = tx.getFieldU32(sfDelaySeconds);
if (delay > 31536000UL /* 365 days in seconds */)
{
JLOG(j.debug())
<< "SetCron: DelaySeconds was too high. (max 365 "
"days in seconds).";
return temMALFORMED;
}
JLOG(j.debug()) << "SetCron: DelaySeconds was too high. (max 365 "
"days in seconds).";
return temMALFORMED;
}
// check repeat is not too high
if (hasRepeat)
{
auto recur = tx.getFieldU32(sfRepeatCount);
if (recur == 0)
{
JLOG(j.debug())
<< "SetCron: RepeatCount must be greater than 0."
"For one-time execution, omit DelaySeconds and "
"RepeatCount.";
return temMALFORMED;
}
if (recur > 256)
{
JLOG(j.debug())
@@ -135,30 +108,6 @@ SetCron::preflight(PreflightContext const& ctx)
TER
SetCron::preclaim(PreclaimContext const& ctx)
{
if (ctx.tx.isFieldPresent(sfStartTime) &&
ctx.tx.getFieldU32(sfStartTime) != 0)
{
// StartTime 0 means the cron will execute immediately
auto const startTime = ctx.tx.getFieldU32(sfStartTime);
auto const parentCloseTime =
ctx.view.parentCloseTime().time_since_epoch().count();
if (startTime < parentCloseTime)
{
JLOG(ctx.j.debug()) << "SetCron: StartTime must be in the future "
"(or 0 for immediate execution)";
return tecEXPIRED;
}
if (startTime > ctx.view.parentCloseTime().time_since_epoch().count() +
365 * 24 * 60 * 60)
{
JLOG(ctx.j.debug()) << "SetCron: StartTime is too far in the "
"future (max 365 days).";
return tecEXPIRED;
}
}
return tesSUCCESS;
}
@@ -174,7 +123,6 @@ SetCron::doApply()
// ledger.
uint32_t delay{0};
uint32_t recur{0};
uint32_t startTime{0};
if (!isDelete)
{
@@ -182,14 +130,17 @@ SetCron::doApply()
delay = tx.getFieldU32(sfDelaySeconds);
if (tx.isFieldPresent(sfRepeatCount))
recur = tx.getFieldU32(sfRepeatCount);
if (tx.isFieldPresent(sfStartTime))
{
startTime = tx.getFieldU32(sfStartTime);
if (startTime == 0)
startTime = view.parentCloseTime().time_since_epoch().count();
}
}
uint32_t currentTime = view.parentCloseTime().time_since_epoch().count();
// do all this sanity checking before we modify the ledger...
// even for a delete operation this will fall through without incident
uint32_t afterTime = currentTime + delay;
if (afterTime < currentTime)
return tefINTERNAL;
AccountID const& id = tx.getAccountID(sfAccount);
auto sle = view.peek(keylet::account(id));
if (!sle)
@@ -239,7 +190,7 @@ SetCron::doApply()
// execution to here means we're creating a new Cron object and adding it to
// the user's owner dir
Keylet klCron = keylet::cron(startTime, id);
Keylet klCron = keylet::cron(afterTime, id);
std::shared_ptr<SLE> sleCron = std::make_shared<SLE>(klCron);
@@ -263,7 +214,6 @@ SetCron::doApply()
adjustOwnerCount(view, sle, 1, j_);
// set the fields
sleCron->setFieldU32(sfStartTime, startTime);
sleCron->setFieldU32(sfDelaySeconds, delay);
sleCron->setFieldU32(sfRepeatCount, recur);
sleCron->setAccountID(sfOwner, id);
@@ -282,18 +232,18 @@ SetCron::calculateBaseFee(ReadView const& view, STTx const& tx)
{
auto const baseFee = Transactor::calculateBaseFee(view, tx);
auto const hasRepeat = tx.isFieldPresent(sfRepeatCount);
if (tx.isFlag(tfCronUnset))
// delete cron
return baseFee;
auto const repeatCount =
tx.isFieldPresent(sfRepeatCount) ? tx.getFieldU32(sfRepeatCount) : 0;
// factor a cost based on the total number of txns expected
// for RepeatCount of 0 we have this txn (SetCron) and the
// single Cron txn (2). For a RepeatCount of 1 we have this txn,
// the first time the cron executes, and the second time (3).
uint32_t const additionalExpectedExecutions = 1 + repeatCount;
uint32_t const additionalExpectedExecutions =
hasRepeat ? tx.getFieldU32(sfRepeatCount) + 1 : 1;
auto const additionalFee = baseFee * additionalExpectedExecutions;
if (baseFee + additionalFee < baseFee)

View File

@@ -23,7 +23,6 @@
#include <ripple/nodestore/Types.h>
#include <atomic>
#include <cstdint>
#include <optional>
namespace ripple {
namespace NodeStore {
@@ -176,14 +175,6 @@ public:
virtual int
fdRequired() const = 0;
/** Get the block size for backends that support it
*/
virtual std::optional<std::size_t>
getBlockSize() const
{
return std::nullopt;
}
/** Returns read and write stats.
@note The Counters struct is specific to and only used

View File

@@ -18,7 +18,6 @@
//==============================================================================
#include <ripple/basics/contract.h>
#include <ripple/beast/core/LexicalCast.h>
#include <ripple/nodestore/Factory.h>
#include <ripple/nodestore/Manager.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
@@ -32,7 +31,6 @@
#include <exception>
#include <memory>
#include <nudb/nudb.hpp>
#include <sstream>
namespace ripple {
namespace NodeStore {
@@ -50,7 +48,6 @@ public:
size_t const keyBytes_;
std::size_t const burstSize_;
std::string const name_;
std::size_t const blockSize_;
nudb::store db_;
std::atomic<bool> deletePath_;
Scheduler& scheduler_;
@@ -65,7 +62,6 @@ public:
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get(keyValues, "path"))
, blockSize_(parseBlockSize(name_, keyValues, journal))
, deletePath_(false)
, scheduler_(scheduler)
{
@@ -85,7 +81,6 @@ public:
, keyBytes_(keyBytes)
, burstSize_(burstSize)
, name_(get(keyValues, "path"))
, blockSize_(parseBlockSize(name_, keyValues, journal))
, db_(context)
, deletePath_(false)
, scheduler_(scheduler)
@@ -115,12 +110,6 @@ public:
return name_;
}
std::optional<std::size_t>
getBlockSize() const override
{
return blockSize_;
}
void
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
override
@@ -148,7 +137,7 @@ public:
uid,
salt,
keyBytes_,
blockSize_,
nudb::block_size(kp),
0.50,
ec);
if (ec == nudb::errc::file_exists)
@@ -373,56 +362,6 @@ public:
{
return 3;
}
private:
static std::size_t
parseBlockSize(
std::string const& name,
Section const& keyValues,
beast::Journal journal)
{
using namespace boost::filesystem;
auto const folder = path(name);
auto const kp = (folder / "nudb.key").string();
std::size_t const defaultSize =
nudb::block_size(kp); // Default 4K from NuDB
std::size_t blockSize = defaultSize;
std::string blockSizeStr;
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
{
return blockSize; // Early return with default
}
try
{
std::size_t const parsedBlockSize =
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
// Validate: must be power of 2 between 4K and 32K
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
{
std::stringstream s;
s << "Invalid nudb_block_size: " << parsedBlockSize
<< ". Must be power of 2 between 4096 and 32768.";
Throw<std::runtime_error>(s.str());
}
JLOG(journal.info())
<< "Using custom NuDB block size: " << parsedBlockSize
<< " bytes";
return parsedBlockSize;
}
catch (std::exception const& e)
{
std::stringstream s;
s << "Invalid nudb_block_size value: " << blockSizeStr
<< ". Error: " << e.what();
Throw<std::runtime_error>(s.str());
}
}
};
//------------------------------------------------------------------------------

View File

@@ -412,7 +412,6 @@ extern SF_UINT32 const sfImportSequence;
extern SF_UINT32 const sfXahauActivationLgrSeq;
extern SF_UINT32 const sfDelaySeconds;
extern SF_UINT32 const sfRepeatCount;
extern SF_UINT32 const sfStartTime;
// 64-bit integers (common)
extern SF_UINT64 const sfIndexNext;

View File

@@ -371,7 +371,6 @@ LedgerFormats::LedgerFormats()
ltCRON,
{
{sfOwner, soeREQUIRED},
{sfStartTime, soeREQUIRED},
{sfDelaySeconds, soeREQUIRED},
{sfRepeatCount, soeREQUIRED},
{sfOwnerNode, soeREQUIRED},

View File

@@ -157,7 +157,6 @@ CONSTRUCT_TYPED_SFIELD(sfLockCount, "LockCount", UINT32,
CONSTRUCT_TYPED_SFIELD(sfFirstNFTokenSequence, "FirstNFTokenSequence", UINT32, 50);
CONSTRUCT_TYPED_SFIELD(sfStartTime, "StartTime", UINT32, 93);
CONSTRUCT_TYPED_SFIELD(sfRepeatCount, "RepeatCount", UINT32, 94);
CONSTRUCT_TYPED_SFIELD(sfDelaySeconds, "DelaySeconds", UINT32, 95);
CONSTRUCT_TYPED_SFIELD(sfXahauActivationLgrSeq, "XahauActivationLgrSeq",UINT32, 96);

View File

@@ -486,7 +486,6 @@ TxFormats::TxFormats()
{
{sfDelaySeconds, soeOPTIONAL},
{sfRepeatCount, soeOPTIONAL},
{sfStartTime, soeOPTIONAL},
},
commonFields);
}

View File

@@ -48,13 +48,9 @@ struct Cron_test : public beast::unit_test::suite
auto const expectResult =
withCron ? ter(tesSUCCESS) : ter(temDISABLED);
auto tx = cron::set(alice);
// CLAIM
env(cron::set(alice),
cron::startTime(0),
cron::repeat(100),
cron::delay(100),
fee(XRP(1)),
expectResult);
env(cron::set(alice), cron::delay(100), fee(XRP(1)), expectResult);
env.close();
}
}
@@ -74,10 +70,9 @@ struct Cron_test : public beast::unit_test::suite
env.fund(XRP(1000), alice);
env.close();
// create
// create with RepeatCount
auto expected = baseFee * 2 + baseFee * 256;
env(cron::set(alice),
cron::startTime(0),
cron::delay(356 * 24 * 60 * 60),
cron::repeat(256),
fee(expected - 1),
@@ -85,13 +80,26 @@ struct Cron_test : public beast::unit_test::suite
env.close();
env(cron::set(alice),
cron::startTime(0),
cron::delay(356 * 24 * 60 * 60),
cron::repeat(256),
fee(expected),
ter(tesSUCCESS));
env.close();
// create with no RepeatCount
expected = baseFee * 2;
env(cron::set(alice),
cron::delay(356 * 24 * 60 * 60),
fee(expected - 1),
ter(telINSUF_FEE_P));
env.close();
env(cron::set(alice),
cron::delay(356 * 24 * 60 * 60),
fee(expected),
ter(tesSUCCESS));
env.close();
// delete
expected = baseFee;
env(cron::set(alice),
@@ -135,47 +143,30 @@ struct Cron_test : public beast::unit_test::suite
// temMALFORMED
{
// Invalid DelaySeconds and RepeatCount and StartTime are not
// specified
// Invalid both DelaySeconds and RepeatCount are not specified
env(cron::set(alice), ter(temMALFORMED));
// Invalid DelaySeconds and RepeatCount combination with StartTime
env(cron::set(alice),
cron::startTime(100),
cron::delay(356 * 24 * 60 * 60),
ter(temMALFORMED));
env(cron::set(alice),
cron::startTime(100),
cron::repeat(256),
ter(temMALFORMED));
// Invalid DelaySeconds and RepeatCount combination
// (only RepeatCount specified)
env(cron::set(alice), cron::repeat(256), ter(temMALFORMED));
// Invalid DelaySeconds
env(cron::set(alice),
cron::startTime(100),
cron::delay(365 * 24 * 60 * 60 + 1),
cron::repeat(256),
ter(temMALFORMED));
// Invalid RepeatCount
env(cron::set(alice),
cron::startTime(100),
cron::delay(365 * 24 * 60 * 60),
cron::repeat(257),
ter(temMALFORMED));
// Invalid with tfCronUnset flag
// Invalid tfCronUnset flag
env(cron::set(alice),
cron::delay(365 * 24 * 60 * 60),
txflags(tfCronUnset),
ter(temMALFORMED));
env(cron::set(alice),
cron::repeat(100),
txflags(tfCronUnset),
ter(temMALFORMED));
env(cron::set(alice),
cron::startTime(100),
txflags(tfCronUnset),
ter(temMALFORMED));
}
}
@@ -188,25 +179,9 @@ struct Cron_test : public beast::unit_test::suite
auto const alice = Account("alice");
Env env{*this, features | featureCron};
env.fund(XRP(1000), alice);
env.close();
// Past StartTime
env(cron::set(alice),
cron::startTime(
env.timeKeeper().now().time_since_epoch().count() - 1),
fee(XRP(1)),
ter(tecEXPIRED));
env.close();
// Too far Future StartTime
env(cron::set(alice),
cron::startTime(
env.timeKeeper().now().time_since_epoch().count() +
365 * 24 * 60 * 60 + 1),
fee(XRP(1)),
ter(tecEXPIRED));
env.close();
// there is no check in preclaim
BEAST_EXPECT(true);
}
void
@@ -224,10 +199,7 @@ struct Cron_test : public beast::unit_test::suite
auto const aliceOwnerCount = ownerCount(env, alice);
// create cron
auto parentCloseTime =
env.current()->parentCloseTime().time_since_epoch().count();
env(cron::set(alice),
cron::startTime(parentCloseTime + 356 * 24 * 60 * 60),
cron::delay(356 * 24 * 60 * 60),
cron::repeat(256),
fee(XRP(1)),
@@ -247,15 +219,9 @@ struct Cron_test : public beast::unit_test::suite
BEAST_EXPECT(
cronSle->getFieldU32(sfDelaySeconds) == 356 * 24 * 60 * 60);
BEAST_EXPECT(cronSle->getFieldU32(sfRepeatCount) == 256);
BEAST_EXPECT(
cronSle->getFieldU32(sfStartTime) ==
parentCloseTime + 356 * 24 * 60 * 60);
// update cron
parentCloseTime =
env.current()->parentCloseTime().time_since_epoch().count();
env(cron::set(alice),
cron::startTime(0),
cron::delay(100),
cron::repeat(10),
fee(XRP(1)),
@@ -277,7 +243,6 @@ struct Cron_test : public beast::unit_test::suite
BEAST_EXPECT(cronSle2);
BEAST_EXPECT(cronSle2->getFieldU32(sfDelaySeconds) == 100);
BEAST_EXPECT(cronSle2->getFieldU32(sfRepeatCount) == 10);
BEAST_EXPECT(cronSle2->getFieldU32(sfStartTime) == parentCloseTime);
// delete cron
env(cron::set(alice),
@@ -324,7 +289,6 @@ struct Cron_test : public beast::unit_test::suite
auto repeatCount = 10;
env(cron::set(alice),
cron::startTime(baseTime + 100),
cron::delay(100),
cron::repeat(repeatCount),
fee(XRP(1)));
@@ -347,7 +311,7 @@ struct Cron_test : public beast::unit_test::suite
}
// close after 100 seconds passed
env.close(10s);
env.close();
auto txns = env.closed()->txs;
auto size = std::distance(txns.begin(), txns.end());
@@ -384,7 +348,8 @@ struct Cron_test : public beast::unit_test::suite
cronSle->getAccountID(sfOwner) == alice.id());
// set new base time
baseTime = baseTime + 100;
baseTime =
env.timeKeeper().now().time_since_epoch().count();
lastCronKeylet = cronKeylet;
}
else
@@ -415,7 +380,7 @@ struct Cron_test : public beast::unit_test::suite
for (auto const& account : accounts)
{
env(cron::set(account), cron::startTime(0), fee(XRP(1)));
env(cron::set(account), cron::delay(0), fee(XRP(1)));
}
env.close();

View File

@@ -6297,7 +6297,6 @@ private:
// cron set
env(cron::set(account),
cron::startTime(0),
cron::delay(100),
cron::repeat(1),
fee(XRP(1)),
@@ -6333,11 +6332,8 @@ private:
env.fund(XRP(1000), account);
env.close();
auto const baseTime =
env.current()->parentCloseTime().time_since_epoch().count();
// cron set
env(cron::set(account),
cron::startTime(baseTime + 100),
cron::delay(100),
cron::repeat(1),
fee(XRP(1)),

View File

@@ -34,21 +34,6 @@ namespace cron {
Json::Value
set(jtx::Account const& account);
/** Sets the optional StartTime on a JTx. */
class startTime
{
private:
uint32_t startTime_;
public:
explicit startTime(uint32_t startTime) : startTime_(startTime)
{
}
void
operator()(Env&, JTx& jtx) const;
};
/** Sets the optional DelaySeconds on a JTx. */
class delay
{

View File

@@ -37,12 +37,6 @@ set(jtx::Account const& account)
return jv;
}
void
startTime::operator()(Env& env, JTx& jt) const
{
jt.jv[sfStartTime.jsonName] = startTime_;
}
void
delay::operator()(Env& env, JTx& jt) const
{

View File

@@ -1,357 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <test/nodestore/TestBase.h>
#include <test/unit_test/SuiteJournal.h>
#include <ripple/basics/BasicConfig.h>
#include <ripple/basics/ByteUtilities.h>
#include <ripple/beast/utility/temp_dir.h>
#include <ripple/nodestore/DummyScheduler.h>
#include <ripple/nodestore/Manager.h>
#include <memory>
#include <sstream>
namespace ripple {
namespace NodeStore {
class NuDBFactory_test : public TestBase
{
private:
// Helper function to create a Section with specified parameters
Section
createSection(std::string const& path, std::string const& blockSize = "")
{
Section params;
params.set("type", "nudb");
params.set("path", path);
if (!blockSize.empty())
params.set("nudb_block_size", blockSize);
return params;
}
// Helper function to create a backend and test basic functionality
bool
testBackendFunctionality(
Section const& params,
std::size_t expectedBlocksize)
{
try
{
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
if (!BEAST_EXPECT(backend))
return false;
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
return false;
backend->open();
if (!BEAST_EXPECT(backend->isOpen()))
return false;
// Test basic store/fetch functionality
auto batch = createPredictableBatch(10, 12345);
storeBatch(*backend, batch);
Batch copy;
fetchCopyOfBatch(*backend, &copy, batch);
backend->close();
return areBatchesEqual(batch, copy);
}
catch (...)
{
return false;
}
}
// Helper function to test log messages
void
testLogMessage(
Section const& params,
beast::severities::Severity level,
std::string const& expectedMessage)
{
test::StreamSink sink(level);
beast::Journal journal(sink);
DummyScheduler scheduler;
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
std::string logOutput = sink.messages().str();
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
}
public:
void
testDefaultBlockSize()
{
testcase("Default block size (no nudb_block_size specified)");
beast::temp_dir tempDir;
auto params = createSection(tempDir.path());
// Should work with default 4096 block size
BEAST_EXPECT(testBackendFunctionality(params, 4096));
}
void
testValidBlockSizes()
{
testcase("Valid block sizes");
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
for (auto const& size : validSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), to_string(size));
BEAST_EXPECT(testBackendFunctionality(params, size));
}
}
void
testInvalidBlockSizes()
{
testcase("Invalid block sizes");
std::vector<std::string> invalidSizes = {
"2048", // Too small
"1024", // Too small
"65536", // Too large
"131072", // Too large
"5000", // Not power of 2
"6000", // Not power of 2
"10000", // Not power of 2
"0", // Zero
"-1", // Negative
"abc", // Non-numeric
"4k", // Invalid format
"4096.5" // Decimal
};
for (auto const& size : invalidSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
// Should throw exception for invalid sizes
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
// If we get here, the test failed
BEAST_EXPECT(false);
}
catch (std::exception const& e)
{
// Expected exception
std::string error{e.what()};
BEAST_EXPECT(
error.find("Invalid nudb_block_size") != std::string::npos);
}
}
}
void
testLogMessages()
{
testcase("Log message verification");
// Test valid custom block size logging
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "8192");
testLogMessage(
params,
beast::severities::kInfo,
"Using custom NuDB block size: 8192");
}
// Test invalid block size exception message
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "5000");
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size: 5000") !=
std::string::npos);
BEAST_EXPECT(
logOutput.find(
"Must be power of 2 between 4096 and 32768") !=
std::string::npos);
}
}
// Test non-numeric value exception message
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), "invalid");
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
fail();
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size value: invalid") !=
std::string::npos);
}
}
}
void
testPowerOfTwoValidation()
{
testcase("Power of 2 validation logic");
// Test edge cases around valid range
std::vector<std::pair<std::string, bool>> testCases = {
{"4095", false}, // Just below minimum
{"4096", true}, // Minimum valid
{"4097", false}, // Just above minimum, not power of 2
{"8192", true}, // Valid power of 2
{"8193", false}, // Just above valid power of 2
{"16384", true}, // Valid power of 2
{"32768", true}, // Maximum valid
{"32769", false}, // Just above maximum
{"65536", false} // Power of 2 but too large
};
for (auto const& [size, shouldWork] : testCases)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
test::StreamSink sink(beast::severities::kWarning);
beast::Journal journal(sink);
DummyScheduler scheduler;
try
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
BEAST_EXPECT(shouldWork);
}
catch (std::exception const& e)
{
std::string logOutput{e.what()};
BEAST_EXPECT(
logOutput.find("Invalid nudb_block_size") !=
std::string::npos);
BEAST_EXPECT(!shouldWork);
}
}
}
void
testDataPersistence()
{
testcase("Data persistence with different block sizes");
std::vector<std::string> blockSizes = {
"4096", "8192", "16384", "32768"};
for (auto const& size : blockSizes)
{
beast::temp_dir tempDir;
auto params = createSection(tempDir.path(), size);
DummyScheduler scheduler;
test::SuiteJournal journal("NuDBFactory_test", *this);
// Create test data
auto batch = createPredictableBatch(50, 54321);
// Store data
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
storeBatch(*backend, batch);
backend->close();
}
// Retrieve data in new backend instance
{
auto backend = Manager::instance().make_Backend(
params, megabytes(4), scheduler, journal);
backend->open();
Batch copy;
fetchCopyOfBatch(*backend, &copy, batch);
BEAST_EXPECT(areBatchesEqual(batch, copy));
backend->close();
}
}
}
void
run() override
{
testDefaultBlockSize();
testValidBlockSizes();
testInvalidBlockSizes();
testLogMessages();
testPowerOfTwoValidation();
testDataPersistence();
}
};
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
} // namespace NodeStore
} // namespace ripple