mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-24 12:35:50 +00:00
Compare commits
24 Commits
nudb-block
...
cronjob
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ccf3b38fad | ||
|
|
86a9ea999f | ||
|
|
0755fb186a | ||
|
|
526154b97a | ||
|
|
052135a800 | ||
|
|
793249d031 | ||
|
|
c1e011a16a | ||
|
|
caa486f19c | ||
|
|
106abfa9e0 | ||
|
|
88828bbf63 | ||
|
|
cf3db6eb42 | ||
|
|
9b1f3ebdd7 | ||
|
|
e64692fc8b | ||
|
|
ac1bf88596 | ||
|
|
0d7dd0597d | ||
|
|
3dddb907c2 | ||
|
|
9d6ea9ac60 | ||
|
|
e9a414cff2 | ||
|
|
fe1b424bea | ||
|
|
b1c366761f | ||
|
|
82af6d9eee | ||
|
|
20e6e62660 | ||
|
|
1d6066127c | ||
|
|
d3d5c757fe |
@@ -965,7 +965,6 @@ if (tests)
|
|||||||
src/test/nodestore/Basics_test.cpp
|
src/test/nodestore/Basics_test.cpp
|
||||||
src/test/nodestore/DatabaseShard_test.cpp
|
src/test/nodestore/DatabaseShard_test.cpp
|
||||||
src/test/nodestore/Database_test.cpp
|
src/test/nodestore/Database_test.cpp
|
||||||
src/test/nodestore/NuDBFactory_test.cpp
|
|
||||||
src/test/nodestore/Timing_test.cpp
|
src/test/nodestore/Timing_test.cpp
|
||||||
src/test/nodestore/import_test.cpp
|
src/test/nodestore/import_test.cpp
|
||||||
src/test/nodestore/varint_test.cpp
|
src/test/nodestore/varint_test.cpp
|
||||||
|
|||||||
@@ -176,10 +176,9 @@ existing maintainer without a vote.
|
|||||||
|
|
||||||
## Current Maintainers
|
## Current Maintainers
|
||||||
|
|
||||||
* [Richard Holland](https://github.com/RichardAH) (XRPL Labs + INFTF)
|
* [Richard Holland](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation)
|
||||||
* [Denis Angell](https://github.com/dangell7) (XRPL Labs + INFTF)
|
* [Denis Angell](https://github.com/dangell7) (XRPL Labs + XRP Ledger Foundation)
|
||||||
* [Wietse Wind](https://github.com/WietseWind) (XRPL Labs + INFTF)
|
* [Wietse Wind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation)
|
||||||
* [tequ](https://github.com/tequdev) (Independent + INFTF)
|
|
||||||
|
|
||||||
|
|
||||||
[1]: https://docs.github.com/en/get-started/quickstart/contributing-to-projects
|
[1]: https://docs.github.com/en/get-started/quickstart/contributing-to-projects
|
||||||
|
|||||||
@@ -1127,39 +1127,6 @@
|
|||||||
# it must be defined with the same value in both
|
# it must be defined with the same value in both
|
||||||
# sections.
|
# sections.
|
||||||
#
|
#
|
||||||
# Optional keys for NuDB only:
|
|
||||||
#
|
|
||||||
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
|
|
||||||
# Must be a power of 2 between 4096 and 32768. Default is 4096.
|
|
||||||
#
|
|
||||||
# This parameter controls the fundamental storage unit
|
|
||||||
# size for NuDB's internal data structures. The choice
|
|
||||||
# of block size can significantly impact performance
|
|
||||||
# depending on your storage hardware and filesystem:
|
|
||||||
#
|
|
||||||
# - 4096 bytes: Optimal for most standard SSDs and
|
|
||||||
# traditional filesystems (ext4, NTFS, HFS+).
|
|
||||||
# Provides good balance of performance and storage
|
|
||||||
# efficiency. Recommended for most deployments.
|
|
||||||
#
|
|
||||||
# - 8192-16384 bytes: May improve performance on
|
|
||||||
# high-end NVMe SSDs and copy-on-write filesystems
|
|
||||||
# like ZFS or Btrfs that benefit from larger block
|
|
||||||
# alignment. Can reduce metadata overhead for large
|
|
||||||
# databases.
|
|
||||||
#
|
|
||||||
# - 32768 bytes (32K): Maximum supported block size
|
|
||||||
# for high-performance scenarios with very fast
|
|
||||||
# storage. May increase memory usage and reduce
|
|
||||||
# efficiency for smaller databases.
|
|
||||||
#
|
|
||||||
# Note: This setting cannot be changed after database
|
|
||||||
# creation without rebuilding the entire database.
|
|
||||||
# Choose carefully based on your hardware and expected
|
|
||||||
# database size.
|
|
||||||
#
|
|
||||||
# Example: nudb_block_size=4096
|
|
||||||
#
|
|
||||||
|
|
||||||
# These keys modify the behavior of online_delete, and thus are only
|
# These keys modify the behavior of online_delete, and thus are only
|
||||||
# relevant if online_delete is defined and non-zero:
|
# relevant if online_delete is defined and non-zero:
|
||||||
|
|||||||
@@ -62,9 +62,6 @@
|
|||||||
#define sfEmitGeneration ((2U << 16U) + 46U)
|
#define sfEmitGeneration ((2U << 16U) + 46U)
|
||||||
#define sfLockCount ((2U << 16U) + 49U)
|
#define sfLockCount ((2U << 16U) + 49U)
|
||||||
#define sfFirstNFTokenSequence ((2U << 16U) + 50U)
|
#define sfFirstNFTokenSequence ((2U << 16U) + 50U)
|
||||||
#define sfStartTime ((2U << 16U) + 93U)
|
|
||||||
#define sfRepeatCount ((2U << 16U) + 94U)
|
|
||||||
#define sfDelaySeconds ((2U << 16U) + 95U)
|
|
||||||
#define sfXahauActivationLgrSeq ((2U << 16U) + 96U)
|
#define sfXahauActivationLgrSeq ((2U << 16U) + 96U)
|
||||||
#define sfImportSequence ((2U << 16U) + 97U)
|
#define sfImportSequence ((2U << 16U) + 97U)
|
||||||
#define sfRewardTime ((2U << 16U) + 98U)
|
#define sfRewardTime ((2U << 16U) + 98U)
|
||||||
@@ -132,7 +129,6 @@
|
|||||||
#define sfGovernanceFlags ((5U << 16U) + 99U)
|
#define sfGovernanceFlags ((5U << 16U) + 99U)
|
||||||
#define sfGovernanceMarks ((5U << 16U) + 98U)
|
#define sfGovernanceMarks ((5U << 16U) + 98U)
|
||||||
#define sfEmittedTxnID ((5U << 16U) + 97U)
|
#define sfEmittedTxnID ((5U << 16U) + 97U)
|
||||||
#define sfCron ((5U << 16U) + 95U)
|
|
||||||
#define sfAmount ((6U << 16U) + 1U)
|
#define sfAmount ((6U << 16U) + 1U)
|
||||||
#define sfBalance ((6U << 16U) + 2U)
|
#define sfBalance ((6U << 16U) + 2U)
|
||||||
#define sfLimitAmount ((6U << 16U) + 3U)
|
#define sfLimitAmount ((6U << 16U) + 3U)
|
||||||
|
|||||||
@@ -31,8 +31,6 @@
|
|||||||
#define ttURITOKEN_BUY 47
|
#define ttURITOKEN_BUY 47
|
||||||
#define ttURITOKEN_CREATE_SELL_OFFER 48
|
#define ttURITOKEN_CREATE_SELL_OFFER 48
|
||||||
#define ttURITOKEN_CANCEL_SELL_OFFER 49
|
#define ttURITOKEN_CANCEL_SELL_OFFER 49
|
||||||
#define ttCRON 92
|
|
||||||
#define ttCRON_SET 93
|
|
||||||
#define ttREMIT 95
|
#define ttREMIT 95
|
||||||
#define ttGENESIS_MINT 96
|
#define ttGENESIS_MINT 96
|
||||||
#define ttIMPORT 97
|
#define ttIMPORT 97
|
||||||
|
|||||||
@@ -94,6 +94,21 @@ Change::preflight(PreflightContext const& ctx)
|
|||||||
"of sfImportVLKey, sfActiveValidator";
|
"of sfImportVLKey, sfActiveValidator";
|
||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if we do specify import_vl_keys in config then we won't approve keys
|
||||||
|
// that aren't on our list
|
||||||
|
if (ctx.tx.isFieldPresent(sfImportVLKey) &&
|
||||||
|
!ctx.app.config().IMPORT_VL_KEYS.empty())
|
||||||
|
{
|
||||||
|
auto const& inner = const_cast<ripple::STTx&>(ctx.tx)
|
||||||
|
.getField(sfImportVLKey)
|
||||||
|
.downcast<STObject>();
|
||||||
|
auto const pk = inner.getFieldVL(sfPublicKey);
|
||||||
|
std::string const strPk = strHex(makeSlice(pk));
|
||||||
|
if (ctx.app.config().IMPORT_VL_KEYS.find(strPk) ==
|
||||||
|
ctx.app.config().IMPORT_VL_KEYS.end())
|
||||||
|
return telIMPORT_VL_KEY_NOT_RECOGNISED;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return tesSUCCESS;
|
return tesSUCCESS;
|
||||||
@@ -153,42 +168,9 @@ Change::preclaim(PreclaimContext const& ctx)
|
|||||||
return tesSUCCESS;
|
return tesSUCCESS;
|
||||||
case ttAMENDMENT:
|
case ttAMENDMENT:
|
||||||
case ttUNL_MODIFY:
|
case ttUNL_MODIFY:
|
||||||
|
case ttUNL_REPORT:
|
||||||
case ttEMIT_FAILURE:
|
case ttEMIT_FAILURE:
|
||||||
return tesSUCCESS;
|
return tesSUCCESS;
|
||||||
case ttUNL_REPORT: {
|
|
||||||
if (!ctx.tx.isFieldPresent(sfImportVLKey) ||
|
|
||||||
ctx.app.config().IMPORT_VL_KEYS.empty())
|
|
||||||
return tesSUCCESS;
|
|
||||||
|
|
||||||
// if we do specify import_vl_keys in config then we won't approve
|
|
||||||
// keys that aren't on our list and/or aren't in the ledger object
|
|
||||||
auto const& inner = const_cast<ripple::STTx&>(ctx.tx)
|
|
||||||
.getField(sfImportVLKey)
|
|
||||||
.downcast<STObject>();
|
|
||||||
auto const pkBlob = inner.getFieldVL(sfPublicKey);
|
|
||||||
std::string const strPk = strHex(makeSlice(pkBlob));
|
|
||||||
if (ctx.app.config().IMPORT_VL_KEYS.find(strPk) !=
|
|
||||||
ctx.app.config().IMPORT_VL_KEYS.end())
|
|
||||||
return tesSUCCESS;
|
|
||||||
|
|
||||||
auto const pkType = publicKeyType(makeSlice(pkBlob));
|
|
||||||
if (!pkType)
|
|
||||||
return tefINTERNAL;
|
|
||||||
|
|
||||||
PublicKey const pk(makeSlice(pkBlob));
|
|
||||||
|
|
||||||
// check on ledger
|
|
||||||
if (auto const unlRep = ctx.view.read(keylet::UNLReport());
|
|
||||||
unlRep && unlRep->isFieldPresent(sfImportVLKeys))
|
|
||||||
{
|
|
||||||
auto const& vlKeys = unlRep->getFieldArray(sfImportVLKeys);
|
|
||||||
for (auto const& k : vlKeys)
|
|
||||||
if (PublicKey(k[sfPublicKey]) == pk)
|
|
||||||
return tesSUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
return telIMPORT_VL_KEY_NOT_RECOGNISED;
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return temUNKNOWN;
|
return temUNKNOWN;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,11 +121,11 @@ Cron::doApply()
|
|||||||
uint32_t delay = sleCron->getFieldU32(sfDelaySeconds);
|
uint32_t delay = sleCron->getFieldU32(sfDelaySeconds);
|
||||||
uint32_t recur = sleCron->getFieldU32(sfRepeatCount);
|
uint32_t recur = sleCron->getFieldU32(sfRepeatCount);
|
||||||
|
|
||||||
uint32_t lastStartTime = sleCron->getFieldU32(sfStartTime);
|
uint32_t currentTime = view.parentCloseTime().time_since_epoch().count();
|
||||||
|
|
||||||
// do all this sanity checking before we modify the ledger...
|
// do all this sanity checking before we modify the ledger...
|
||||||
uint32_t afterTime = lastStartTime + delay;
|
uint32_t afterTime = currentTime + delay;
|
||||||
if (afterTime < lastStartTime)
|
if (afterTime < currentTime)
|
||||||
return tefINTERNAL;
|
return tefINTERNAL;
|
||||||
|
|
||||||
// in all circumstances the Cron object is deleted...
|
// in all circumstances the Cron object is deleted...
|
||||||
@@ -163,7 +163,6 @@ Cron::doApply()
|
|||||||
sleCron->setFieldU64(sfOwnerNode, *page);
|
sleCron->setFieldU64(sfOwnerNode, *page);
|
||||||
sleCron->setFieldU32(sfDelaySeconds, delay);
|
sleCron->setFieldU32(sfDelaySeconds, delay);
|
||||||
sleCron->setFieldU32(sfRepeatCount, recur - 1);
|
sleCron->setFieldU32(sfRepeatCount, recur - 1);
|
||||||
sleCron->setFieldU32(sfStartTime, afterTime);
|
|
||||||
sleCron->setAccountID(sfOwner, id);
|
sleCron->setAccountID(sfOwner, id);
|
||||||
|
|
||||||
sle->setFieldH256(sfCron, klCron.key);
|
sle->setFieldH256(sfCron, klCron.key);
|
||||||
|
|||||||
@@ -51,74 +51,80 @@ SetCron::preflight(PreflightContext const& ctx)
|
|||||||
return temINVALID_FLAG;
|
return temINVALID_FLAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
// DelaySeconds (D), RepeatCount (R), StartTime (S)
|
// StartAfter(s) DelaySeconds (D), RepeatCount (R)
|
||||||
// DRS - Set Cron with Delay and Repeat and StartTime
|
|
||||||
// DR- - Invalid(StartTime is required)
|
// SDR - Set Cron with After, Delay and Repeat
|
||||||
// D-S - Invalid (both DelaySeconds and RepeatCount are required)
|
// SD- - Invalid, if repeat count isn't included then only start or delay
|
||||||
// -RS - Invalid (both DelaySeconds and RepeatCount are required)
|
// S-R - Invalid
|
||||||
// --S - Onetime cron with StartTime only
|
// S-- - Set Cron with After for a once off execution
|
||||||
// -- - Clear any existing cron (succeeds even if there isn't one) / with
|
|
||||||
|
// -DR - Set Cron with Delay and Repeat
|
||||||
|
// -D- - Set Cron (once off) with Delay only (repat implicitly 0)
|
||||||
|
// --R - Invalid
|
||||||
|
// --- - Clear any existing cron (succeeds even if there isn't one) / with
|
||||||
// tfCronUnset flag set
|
// tfCronUnset flag set
|
||||||
|
|
||||||
|
bool const hasStart = tx.isFieldPresent(sfStartAfter);
|
||||||
bool const hasDelay = tx.isFieldPresent(sfDelaySeconds);
|
bool const hasDelay = tx.isFieldPresent(sfDelaySeconds);
|
||||||
bool const hasRepeat = tx.isFieldPresent(sfRepeatCount);
|
bool const hasRepeat = tx.isFieldPresent(sfRepeatCount);
|
||||||
bool const hasStartTime = tx.isFieldPresent(sfStartTime);
|
|
||||||
|
|
||||||
|
// unset is a special case, handle first
|
||||||
if (tx.isFlag(tfCronUnset))
|
if (tx.isFlag(tfCronUnset))
|
||||||
{
|
{
|
||||||
// delete operation
|
if (hasDelay || hasRepeat || hasStart)
|
||||||
if (hasDelay || hasRepeat || hasStartTime)
|
|
||||||
{
|
{
|
||||||
JLOG(j.debug()) << "SetCron: tfCronUnset flag cannot be used with "
|
JLOG(j.debug()) << "SetCron: tfCronUnset flag cannot be used with "
|
||||||
"DelaySeconds, RepeatCount or StartTime.";
|
"DelaySeconds or RepeatCount.";
|
||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return preflight2(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hasStart)
|
||||||
|
{
|
||||||
|
if (hasRepeat && hasDelay)
|
||||||
|
{
|
||||||
|
// valid, this is a fully specified cron
|
||||||
|
// fall through to validate other fields
|
||||||
|
}
|
||||||
|
else if (!hasRepeat && !hasDelay)
|
||||||
|
{
|
||||||
|
// valid this is a once off cron
|
||||||
|
// no other fields to validate, done
|
||||||
|
return preflight2(ctx);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// create operation
|
// invalid, must specify both or neither repeat and delay count with
|
||||||
|
// startafter
|
||||||
if (!hasStartTime)
|
JLOG(j.debug()) << "SetCron: StartAfter can only be used with "
|
||||||
{
|
"either both or neither of "
|
||||||
JLOG(j.debug())
|
"DelaySeconds and RepeatCount.";
|
||||||
<< "SetCron: StartTime is required. Use StartTime=0 for "
|
|
||||||
"immediate execution, or specify a future timestamp.";
|
|
||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if ((!hasDelay && hasRepeat) || (hasDelay && !hasRepeat))
|
if (!hasDelay)
|
||||||
{
|
{
|
||||||
JLOG(j.debug())
|
JLOG(j.debug()) << "SetCron: DelaySeconds or StartAfter must be "
|
||||||
<< "SetCron: DelaySeconds and RepeatCount must both be present "
|
"specified to create a cron.";
|
||||||
"for recurring crons, or both absent for one-off crons.";
|
|
||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check delay is not too high
|
// check delay is not too high
|
||||||
if (hasDelay)
|
|
||||||
{
|
|
||||||
auto delay = tx.getFieldU32(sfDelaySeconds);
|
auto delay = tx.getFieldU32(sfDelaySeconds);
|
||||||
if (delay > 31536000UL /* 365 days in seconds */)
|
if (delay > 31536000UL /* 365 days in seconds */)
|
||||||
{
|
{
|
||||||
JLOG(j.debug())
|
JLOG(j.debug()) << "SetCron: DelaySeconds was too high. (max 365 "
|
||||||
<< "SetCron: DelaySeconds was too high. (max 365 "
|
|
||||||
"days in seconds).";
|
"days in seconds).";
|
||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// check repeat is not too high
|
// check repeat is not too high
|
||||||
if (hasRepeat)
|
if (hasRepeat)
|
||||||
{
|
{
|
||||||
auto recur = tx.getFieldU32(sfRepeatCount);
|
auto recur = tx.getFieldU32(sfRepeatCount);
|
||||||
if (recur == 0)
|
|
||||||
{
|
|
||||||
JLOG(j.debug())
|
|
||||||
<< "SetCron: RepeatCount must be greater than 0."
|
|
||||||
"For one-time execution, omit DelaySeconds and "
|
|
||||||
"RepeatCount.";
|
|
||||||
return temMALFORMED;
|
|
||||||
}
|
|
||||||
if (recur > 256)
|
if (recur > 256)
|
||||||
{
|
{
|
||||||
JLOG(j.debug())
|
JLOG(j.debug())
|
||||||
@@ -127,7 +133,6 @@ SetCron::preflight(PreflightContext const& ctx)
|
|||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return preflight2(ctx);
|
return preflight2(ctx);
|
||||||
}
|
}
|
||||||
@@ -135,28 +140,30 @@ SetCron::preflight(PreflightContext const& ctx)
|
|||||||
TER
|
TER
|
||||||
SetCron::preclaim(PreclaimContext const& ctx)
|
SetCron::preclaim(PreclaimContext const& ctx)
|
||||||
{
|
{
|
||||||
if (ctx.tx.isFieldPresent(sfStartTime) &&
|
if (ctx.tx.isFieldPresent(sfStartAfter))
|
||||||
ctx.tx.getFieldU32(sfStartTime) != 0)
|
|
||||||
{
|
{
|
||||||
// StartTime 0 means the cron will execute immediately
|
uint32_t currentTime =
|
||||||
|
|
||||||
auto const startTime = ctx.tx.getFieldU32(sfStartTime);
|
|
||||||
auto const parentCloseTime =
|
|
||||||
ctx.view.parentCloseTime().time_since_epoch().count();
|
ctx.view.parentCloseTime().time_since_epoch().count();
|
||||||
|
uint32_t afterTime = ctx.tx.getFieldU32(sfStartAfter);
|
||||||
|
|
||||||
if (startTime < parentCloseTime)
|
if (afterTime <= currentTime)
|
||||||
{
|
{
|
||||||
JLOG(ctx.j.debug()) << "SetCron: StartTime must be in the future "
|
// we'll pass this as though they meant execute asap, similar to a
|
||||||
"(or 0 for immediate execution)";
|
// delay of 0
|
||||||
return tecEXPIRED;
|
return tesSUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (startTime > ctx.view.parentCloseTime().time_since_epoch().count() +
|
uint32_t waitSeconds = afterTime - currentTime;
|
||||||
365 * 24 * 60 * 60)
|
|
||||||
|
if (waitSeconds > afterTime)
|
||||||
|
return tefINTERNAL;
|
||||||
|
|
||||||
|
if (waitSeconds >> 31536000UL /* 365 days in seconds */)
|
||||||
{
|
{
|
||||||
JLOG(ctx.j.debug()) << "SetCron: StartTime is too far in the "
|
JLOG(ctx.j.debug())
|
||||||
"future (max 365 days).";
|
<< "SetCron: DelaySeconds was too high. (max 365 "
|
||||||
return tecEXPIRED;
|
"days in seconds).";
|
||||||
|
return tecSTART_AFTER_TOO_HIGH;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return tesSUCCESS;
|
return tesSUCCESS;
|
||||||
@@ -174,7 +181,7 @@ SetCron::doApply()
|
|||||||
// ledger.
|
// ledger.
|
||||||
uint32_t delay{0};
|
uint32_t delay{0};
|
||||||
uint32_t recur{0};
|
uint32_t recur{0};
|
||||||
uint32_t startTime{0};
|
uint32_t after{0};
|
||||||
|
|
||||||
if (!isDelete)
|
if (!isDelete)
|
||||||
{
|
{
|
||||||
@@ -182,14 +189,20 @@ SetCron::doApply()
|
|||||||
delay = tx.getFieldU32(sfDelaySeconds);
|
delay = tx.getFieldU32(sfDelaySeconds);
|
||||||
if (tx.isFieldPresent(sfRepeatCount))
|
if (tx.isFieldPresent(sfRepeatCount))
|
||||||
recur = tx.getFieldU32(sfRepeatCount);
|
recur = tx.getFieldU32(sfRepeatCount);
|
||||||
if (tx.isFieldPresent(sfStartTime))
|
|
||||||
{
|
|
||||||
startTime = tx.getFieldU32(sfStartTime);
|
|
||||||
if (startTime == 0)
|
|
||||||
startTime = view.parentCloseTime().time_since_epoch().count();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t currentTime = view.parentCloseTime().time_since_epoch().count();
|
||||||
|
|
||||||
|
// do all this sanity checking before we modify the ledger...
|
||||||
|
// even for a delete operation this will fall through without incident
|
||||||
|
|
||||||
|
uint32_t afterTime = tx.isFieldPresent(sfStartAfter)
|
||||||
|
? tx.getFieldU32(sfStartAfter)
|
||||||
|
: currentTime + delay;
|
||||||
|
|
||||||
|
if (afterTime < currentTime)
|
||||||
|
return tefINTERNAL;
|
||||||
|
|
||||||
AccountID const& id = tx.getAccountID(sfAccount);
|
AccountID const& id = tx.getAccountID(sfAccount);
|
||||||
auto sle = view.peek(keylet::account(id));
|
auto sle = view.peek(keylet::account(id));
|
||||||
if (!sle)
|
if (!sle)
|
||||||
@@ -239,7 +252,7 @@ SetCron::doApply()
|
|||||||
// execution to here means we're creating a new Cron object and adding it to
|
// execution to here means we're creating a new Cron object and adding it to
|
||||||
// the user's owner dir
|
// the user's owner dir
|
||||||
|
|
||||||
Keylet klCron = keylet::cron(startTime, id);
|
Keylet klCron = keylet::cron(afterTime, id);
|
||||||
|
|
||||||
std::shared_ptr<SLE> sleCron = std::make_shared<SLE>(klCron);
|
std::shared_ptr<SLE> sleCron = std::make_shared<SLE>(klCron);
|
||||||
|
|
||||||
@@ -263,7 +276,6 @@ SetCron::doApply()
|
|||||||
adjustOwnerCount(view, sle, 1, j_);
|
adjustOwnerCount(view, sle, 1, j_);
|
||||||
|
|
||||||
// set the fields
|
// set the fields
|
||||||
sleCron->setFieldU32(sfStartTime, startTime);
|
|
||||||
sleCron->setFieldU32(sfDelaySeconds, delay);
|
sleCron->setFieldU32(sfDelaySeconds, delay);
|
||||||
sleCron->setFieldU32(sfRepeatCount, recur);
|
sleCron->setFieldU32(sfRepeatCount, recur);
|
||||||
sleCron->setAccountID(sfOwner, id);
|
sleCron->setAccountID(sfOwner, id);
|
||||||
@@ -282,18 +294,18 @@ SetCron::calculateBaseFee(ReadView const& view, STTx const& tx)
|
|||||||
{
|
{
|
||||||
auto const baseFee = Transactor::calculateBaseFee(view, tx);
|
auto const baseFee = Transactor::calculateBaseFee(view, tx);
|
||||||
|
|
||||||
|
auto const hasRepeat = tx.isFieldPresent(sfRepeatCount);
|
||||||
|
|
||||||
if (tx.isFlag(tfCronUnset))
|
if (tx.isFlag(tfCronUnset))
|
||||||
// delete cron
|
// delete cron
|
||||||
return baseFee;
|
return baseFee;
|
||||||
|
|
||||||
auto const repeatCount =
|
|
||||||
tx.isFieldPresent(sfRepeatCount) ? tx.getFieldU32(sfRepeatCount) : 0;
|
|
||||||
|
|
||||||
// factor a cost based on the total number of txns expected
|
// factor a cost based on the total number of txns expected
|
||||||
// for RepeatCount of 0 we have this txn (SetCron) and the
|
// for RepeatCount of 0 we have this txn (SetCron) and the
|
||||||
// single Cron txn (2). For a RepeatCount of 1 we have this txn,
|
// single Cron txn (2). For a RepeatCount of 1 we have this txn,
|
||||||
// the first time the cron executes, and the second time (3).
|
// the first time the cron executes, and the second time (3).
|
||||||
uint32_t const additionalExpectedExecutions = 1 + repeatCount;
|
uint32_t const additionalExpectedExecutions =
|
||||||
|
hasRepeat ? tx.getFieldU32(sfRepeatCount) + 1 : 1;
|
||||||
auto const additionalFee = baseFee * additionalExpectedExecutions;
|
auto const additionalFee = baseFee * additionalExpectedExecutions;
|
||||||
|
|
||||||
if (baseFee + additionalFee < baseFee)
|
if (baseFee + additionalFee < baseFee)
|
||||||
|
|||||||
@@ -23,7 +23,6 @@
|
|||||||
#include <ripple/nodestore/Types.h>
|
#include <ripple/nodestore/Types.h>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <optional>
|
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
namespace NodeStore {
|
namespace NodeStore {
|
||||||
@@ -176,14 +175,6 @@ public:
|
|||||||
virtual int
|
virtual int
|
||||||
fdRequired() const = 0;
|
fdRequired() const = 0;
|
||||||
|
|
||||||
/** Get the block size for backends that support it
|
|
||||||
*/
|
|
||||||
virtual std::optional<std::size_t>
|
|
||||||
getBlockSize() const
|
|
||||||
{
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns read and write stats.
|
/** Returns read and write stats.
|
||||||
|
|
||||||
@note The Counters struct is specific to and only used
|
@note The Counters struct is specific to and only used
|
||||||
|
|||||||
@@ -18,7 +18,6 @@
|
|||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#include <ripple/basics/contract.h>
|
#include <ripple/basics/contract.h>
|
||||||
#include <ripple/beast/core/LexicalCast.h>
|
|
||||||
#include <ripple/nodestore/Factory.h>
|
#include <ripple/nodestore/Factory.h>
|
||||||
#include <ripple/nodestore/Manager.h>
|
#include <ripple/nodestore/Manager.h>
|
||||||
#include <ripple/nodestore/impl/DecodedBlob.h>
|
#include <ripple/nodestore/impl/DecodedBlob.h>
|
||||||
@@ -32,7 +31,6 @@
|
|||||||
#include <exception>
|
#include <exception>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <nudb/nudb.hpp>
|
#include <nudb/nudb.hpp>
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
namespace NodeStore {
|
namespace NodeStore {
|
||||||
@@ -50,7 +48,6 @@ public:
|
|||||||
size_t const keyBytes_;
|
size_t const keyBytes_;
|
||||||
std::size_t const burstSize_;
|
std::size_t const burstSize_;
|
||||||
std::string const name_;
|
std::string const name_;
|
||||||
std::size_t const blockSize_;
|
|
||||||
nudb::store db_;
|
nudb::store db_;
|
||||||
std::atomic<bool> deletePath_;
|
std::atomic<bool> deletePath_;
|
||||||
Scheduler& scheduler_;
|
Scheduler& scheduler_;
|
||||||
@@ -65,7 +62,6 @@ public:
|
|||||||
, keyBytes_(keyBytes)
|
, keyBytes_(keyBytes)
|
||||||
, burstSize_(burstSize)
|
, burstSize_(burstSize)
|
||||||
, name_(get(keyValues, "path"))
|
, name_(get(keyValues, "path"))
|
||||||
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
|
||||||
, deletePath_(false)
|
, deletePath_(false)
|
||||||
, scheduler_(scheduler)
|
, scheduler_(scheduler)
|
||||||
{
|
{
|
||||||
@@ -85,7 +81,6 @@ public:
|
|||||||
, keyBytes_(keyBytes)
|
, keyBytes_(keyBytes)
|
||||||
, burstSize_(burstSize)
|
, burstSize_(burstSize)
|
||||||
, name_(get(keyValues, "path"))
|
, name_(get(keyValues, "path"))
|
||||||
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
|
||||||
, db_(context)
|
, db_(context)
|
||||||
, deletePath_(false)
|
, deletePath_(false)
|
||||||
, scheduler_(scheduler)
|
, scheduler_(scheduler)
|
||||||
@@ -115,12 +110,6 @@ public:
|
|||||||
return name_;
|
return name_;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::size_t>
|
|
||||||
getBlockSize() const override
|
|
||||||
{
|
|
||||||
return blockSize_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
|
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
|
||||||
override
|
override
|
||||||
@@ -148,7 +137,7 @@ public:
|
|||||||
uid,
|
uid,
|
||||||
salt,
|
salt,
|
||||||
keyBytes_,
|
keyBytes_,
|
||||||
blockSize_,
|
nudb::block_size(kp),
|
||||||
0.50,
|
0.50,
|
||||||
ec);
|
ec);
|
||||||
if (ec == nudb::errc::file_exists)
|
if (ec == nudb::errc::file_exists)
|
||||||
@@ -373,56 +362,6 @@ public:
|
|||||||
{
|
{
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
static std::size_t
|
|
||||||
parseBlockSize(
|
|
||||||
std::string const& name,
|
|
||||||
Section const& keyValues,
|
|
||||||
beast::Journal journal)
|
|
||||||
{
|
|
||||||
using namespace boost::filesystem;
|
|
||||||
auto const folder = path(name);
|
|
||||||
auto const kp = (folder / "nudb.key").string();
|
|
||||||
|
|
||||||
std::size_t const defaultSize =
|
|
||||||
nudb::block_size(kp); // Default 4K from NuDB
|
|
||||||
std::size_t blockSize = defaultSize;
|
|
||||||
std::string blockSizeStr;
|
|
||||||
|
|
||||||
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
|
|
||||||
{
|
|
||||||
return blockSize; // Early return with default
|
|
||||||
}
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
std::size_t const parsedBlockSize =
|
|
||||||
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
|
|
||||||
|
|
||||||
// Validate: must be power of 2 between 4K and 32K
|
|
||||||
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
|
|
||||||
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
|
|
||||||
{
|
|
||||||
std::stringstream s;
|
|
||||||
s << "Invalid nudb_block_size: " << parsedBlockSize
|
|
||||||
<< ". Must be power of 2 between 4096 and 32768.";
|
|
||||||
Throw<std::runtime_error>(s.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
JLOG(journal.info())
|
|
||||||
<< "Using custom NuDB block size: " << parsedBlockSize
|
|
||||||
<< " bytes";
|
|
||||||
return parsedBlockSize;
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
std::stringstream s;
|
|
||||||
s << "Invalid nudb_block_size value: " << blockSizeStr
|
|
||||||
<< ". Error: " << e.what();
|
|
||||||
Throw<std::runtime_error>(s.str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -412,7 +412,7 @@ extern SF_UINT32 const sfImportSequence;
|
|||||||
extern SF_UINT32 const sfXahauActivationLgrSeq;
|
extern SF_UINT32 const sfXahauActivationLgrSeq;
|
||||||
extern SF_UINT32 const sfDelaySeconds;
|
extern SF_UINT32 const sfDelaySeconds;
|
||||||
extern SF_UINT32 const sfRepeatCount;
|
extern SF_UINT32 const sfRepeatCount;
|
||||||
extern SF_UINT32 const sfStartTime;
|
extern SF_UINT32 const sfStartAfter;
|
||||||
|
|
||||||
// 64-bit integers (common)
|
// 64-bit integers (common)
|
||||||
extern SF_UINT64 const sfIndexNext;
|
extern SF_UINT64 const sfIndexNext;
|
||||||
|
|||||||
@@ -343,6 +343,7 @@ enum TECcodes : TERUnderlyingType {
|
|||||||
tecINSUF_RESERVE_SELLER = 187,
|
tecINSUF_RESERVE_SELLER = 187,
|
||||||
tecIMMUTABLE = 188,
|
tecIMMUTABLE = 188,
|
||||||
tecTOO_MANY_REMARKS = 189,
|
tecTOO_MANY_REMARKS = 189,
|
||||||
|
tecSTART_AFTER_TOO_HIGH = 190,
|
||||||
tecLAST_POSSIBLE_ENTRY = 255,
|
tecLAST_POSSIBLE_ENTRY = 255,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -371,7 +371,6 @@ LedgerFormats::LedgerFormats()
|
|||||||
ltCRON,
|
ltCRON,
|
||||||
{
|
{
|
||||||
{sfOwner, soeREQUIRED},
|
{sfOwner, soeREQUIRED},
|
||||||
{sfStartTime, soeREQUIRED},
|
|
||||||
{sfDelaySeconds, soeREQUIRED},
|
{sfDelaySeconds, soeREQUIRED},
|
||||||
{sfRepeatCount, soeREQUIRED},
|
{sfRepeatCount, soeREQUIRED},
|
||||||
{sfOwnerNode, soeREQUIRED},
|
{sfOwnerNode, soeREQUIRED},
|
||||||
|
|||||||
@@ -157,7 +157,7 @@ CONSTRUCT_TYPED_SFIELD(sfLockCount, "LockCount", UINT32,
|
|||||||
|
|
||||||
CONSTRUCT_TYPED_SFIELD(sfFirstNFTokenSequence, "FirstNFTokenSequence", UINT32, 50);
|
CONSTRUCT_TYPED_SFIELD(sfFirstNFTokenSequence, "FirstNFTokenSequence", UINT32, 50);
|
||||||
|
|
||||||
CONSTRUCT_TYPED_SFIELD(sfStartTime, "StartTime", UINT32, 93);
|
CONSTRUCT_TYPED_SFIELD(sfStartAfter, "StartAfter", UINT32, 93);
|
||||||
CONSTRUCT_TYPED_SFIELD(sfRepeatCount, "RepeatCount", UINT32, 94);
|
CONSTRUCT_TYPED_SFIELD(sfRepeatCount, "RepeatCount", UINT32, 94);
|
||||||
CONSTRUCT_TYPED_SFIELD(sfDelaySeconds, "DelaySeconds", UINT32, 95);
|
CONSTRUCT_TYPED_SFIELD(sfDelaySeconds, "DelaySeconds", UINT32, 95);
|
||||||
CONSTRUCT_TYPED_SFIELD(sfXahauActivationLgrSeq, "XahauActivationLgrSeq",UINT32, 96);
|
CONSTRUCT_TYPED_SFIELD(sfXahauActivationLgrSeq, "XahauActivationLgrSeq",UINT32, 96);
|
||||||
|
|||||||
@@ -94,6 +94,7 @@ transResults()
|
|||||||
MAKE_ERROR(tecINSUF_RESERVE_SELLER, "The seller of an object has insufficient reserves, and thus cannot complete the sale."),
|
MAKE_ERROR(tecINSUF_RESERVE_SELLER, "The seller of an object has insufficient reserves, and thus cannot complete the sale."),
|
||||||
MAKE_ERROR(tecIMMUTABLE, "The remark is marked immutable on the object, and therefore cannot be updated."),
|
MAKE_ERROR(tecIMMUTABLE, "The remark is marked immutable on the object, and therefore cannot be updated."),
|
||||||
MAKE_ERROR(tecTOO_MANY_REMARKS, "The number of remarks on the object would exceed the limit of 32."),
|
MAKE_ERROR(tecTOO_MANY_REMARKS, "The number of remarks on the object would exceed the limit of 32."),
|
||||||
|
MAKE_ERROR(tecSTART_AFTER_TOO_HIGH, "The proposed StartAfter time is greater than one year away."),
|
||||||
MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."),
|
MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."),
|
||||||
MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."),
|
MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."),
|
||||||
MAKE_ERROR(tefBAD_AUTH, "Transaction's public key is not authorized."),
|
MAKE_ERROR(tefBAD_AUTH, "Transaction's public key is not authorized."),
|
||||||
|
|||||||
@@ -486,7 +486,7 @@ TxFormats::TxFormats()
|
|||||||
{
|
{
|
||||||
{sfDelaySeconds, soeOPTIONAL},
|
{sfDelaySeconds, soeOPTIONAL},
|
||||||
{sfRepeatCount, soeOPTIONAL},
|
{sfRepeatCount, soeOPTIONAL},
|
||||||
{sfStartTime, soeOPTIONAL},
|
{sfStartAfter, soeOPTIONAL},
|
||||||
},
|
},
|
||||||
commonFields);
|
commonFields);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,13 +48,9 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
auto const expectResult =
|
auto const expectResult =
|
||||||
withCron ? ter(tesSUCCESS) : ter(temDISABLED);
|
withCron ? ter(tesSUCCESS) : ter(temDISABLED);
|
||||||
|
|
||||||
|
auto tx = cron::set(alice);
|
||||||
// CLAIM
|
// CLAIM
|
||||||
env(cron::set(alice),
|
env(cron::set(alice), cron::delay(100), fee(XRP(1)), expectResult);
|
||||||
cron::startTime(0),
|
|
||||||
cron::repeat(100),
|
|
||||||
cron::delay(100),
|
|
||||||
fee(XRP(1)),
|
|
||||||
expectResult);
|
|
||||||
env.close();
|
env.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -74,10 +70,9 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
env.fund(XRP(1000), alice);
|
env.fund(XRP(1000), alice);
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
// create
|
// create with RepeatCount
|
||||||
auto expected = baseFee * 2 + baseFee * 256;
|
auto expected = baseFee * 2 + baseFee * 256;
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(0),
|
|
||||||
cron::delay(356 * 24 * 60 * 60),
|
cron::delay(356 * 24 * 60 * 60),
|
||||||
cron::repeat(256),
|
cron::repeat(256),
|
||||||
fee(expected - 1),
|
fee(expected - 1),
|
||||||
@@ -85,13 +80,26 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(0),
|
|
||||||
cron::delay(356 * 24 * 60 * 60),
|
cron::delay(356 * 24 * 60 * 60),
|
||||||
cron::repeat(256),
|
cron::repeat(256),
|
||||||
fee(expected),
|
fee(expected),
|
||||||
ter(tesSUCCESS));
|
ter(tesSUCCESS));
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
|
// create with no RepeatCount
|
||||||
|
expected = baseFee * 2;
|
||||||
|
env(cron::set(alice),
|
||||||
|
cron::delay(356 * 24 * 60 * 60),
|
||||||
|
fee(expected - 1),
|
||||||
|
ter(telINSUF_FEE_P));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
env(cron::set(alice),
|
||||||
|
cron::delay(356 * 24 * 60 * 60),
|
||||||
|
fee(expected),
|
||||||
|
ter(tesSUCCESS));
|
||||||
|
env.close();
|
||||||
|
|
||||||
// delete
|
// delete
|
||||||
expected = baseFee;
|
expected = baseFee;
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
@@ -135,47 +143,30 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
|
|
||||||
// temMALFORMED
|
// temMALFORMED
|
||||||
{
|
{
|
||||||
// Invalid DelaySeconds and RepeatCount and StartTime are not
|
// Invalid both DelaySeconds and RepeatCount are not specified
|
||||||
// specified
|
|
||||||
env(cron::set(alice), ter(temMALFORMED));
|
env(cron::set(alice), ter(temMALFORMED));
|
||||||
|
|
||||||
// Invalid DelaySeconds and RepeatCount combination with StartTime
|
// Invalid DelaySeconds and RepeatCount combination
|
||||||
env(cron::set(alice),
|
// (only RepeatCount specified)
|
||||||
cron::startTime(100),
|
env(cron::set(alice), cron::repeat(256), ter(temMALFORMED));
|
||||||
cron::delay(356 * 24 * 60 * 60),
|
|
||||||
ter(temMALFORMED));
|
|
||||||
env(cron::set(alice),
|
|
||||||
cron::startTime(100),
|
|
||||||
cron::repeat(256),
|
|
||||||
ter(temMALFORMED));
|
|
||||||
|
|
||||||
// Invalid DelaySeconds
|
// Invalid DelaySeconds
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(100),
|
|
||||||
cron::delay(365 * 24 * 60 * 60 + 1),
|
cron::delay(365 * 24 * 60 * 60 + 1),
|
||||||
cron::repeat(256),
|
cron::repeat(256),
|
||||||
ter(temMALFORMED));
|
ter(temMALFORMED));
|
||||||
|
|
||||||
// Invalid RepeatCount
|
// Invalid RepeatCount
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(100),
|
|
||||||
cron::delay(365 * 24 * 60 * 60),
|
cron::delay(365 * 24 * 60 * 60),
|
||||||
cron::repeat(257),
|
cron::repeat(257),
|
||||||
ter(temMALFORMED));
|
ter(temMALFORMED));
|
||||||
|
|
||||||
// Invalid with tfCronUnset flag
|
// Invalid tfCronUnset flag
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::delay(365 * 24 * 60 * 60),
|
cron::delay(365 * 24 * 60 * 60),
|
||||||
txflags(tfCronUnset),
|
txflags(tfCronUnset),
|
||||||
ter(temMALFORMED));
|
ter(temMALFORMED));
|
||||||
env(cron::set(alice),
|
|
||||||
cron::repeat(100),
|
|
||||||
txflags(tfCronUnset),
|
|
||||||
ter(temMALFORMED));
|
|
||||||
env(cron::set(alice),
|
|
||||||
cron::startTime(100),
|
|
||||||
txflags(tfCronUnset),
|
|
||||||
ter(temMALFORMED));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,25 +179,9 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
|
|
||||||
auto const alice = Account("alice");
|
auto const alice = Account("alice");
|
||||||
Env env{*this, features | featureCron};
|
Env env{*this, features | featureCron};
|
||||||
env.fund(XRP(1000), alice);
|
|
||||||
env.close();
|
|
||||||
|
|
||||||
// Past StartTime
|
// there is no check in preclaim
|
||||||
env(cron::set(alice),
|
BEAST_EXPECT(true);
|
||||||
cron::startTime(
|
|
||||||
env.timeKeeper().now().time_since_epoch().count() - 1),
|
|
||||||
fee(XRP(1)),
|
|
||||||
ter(tecEXPIRED));
|
|
||||||
env.close();
|
|
||||||
|
|
||||||
// Too far Future StartTime
|
|
||||||
env(cron::set(alice),
|
|
||||||
cron::startTime(
|
|
||||||
env.timeKeeper().now().time_since_epoch().count() +
|
|
||||||
365 * 24 * 60 * 60 + 1),
|
|
||||||
fee(XRP(1)),
|
|
||||||
ter(tecEXPIRED));
|
|
||||||
env.close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -224,10 +199,7 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
auto const aliceOwnerCount = ownerCount(env, alice);
|
auto const aliceOwnerCount = ownerCount(env, alice);
|
||||||
|
|
||||||
// create cron
|
// create cron
|
||||||
auto parentCloseTime =
|
|
||||||
env.current()->parentCloseTime().time_since_epoch().count();
|
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(parentCloseTime + 356 * 24 * 60 * 60),
|
|
||||||
cron::delay(356 * 24 * 60 * 60),
|
cron::delay(356 * 24 * 60 * 60),
|
||||||
cron::repeat(256),
|
cron::repeat(256),
|
||||||
fee(XRP(1)),
|
fee(XRP(1)),
|
||||||
@@ -247,15 +219,9 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
BEAST_EXPECT(
|
BEAST_EXPECT(
|
||||||
cronSle->getFieldU32(sfDelaySeconds) == 356 * 24 * 60 * 60);
|
cronSle->getFieldU32(sfDelaySeconds) == 356 * 24 * 60 * 60);
|
||||||
BEAST_EXPECT(cronSle->getFieldU32(sfRepeatCount) == 256);
|
BEAST_EXPECT(cronSle->getFieldU32(sfRepeatCount) == 256);
|
||||||
BEAST_EXPECT(
|
|
||||||
cronSle->getFieldU32(sfStartTime) ==
|
|
||||||
parentCloseTime + 356 * 24 * 60 * 60);
|
|
||||||
|
|
||||||
// update cron
|
// update cron
|
||||||
parentCloseTime =
|
|
||||||
env.current()->parentCloseTime().time_since_epoch().count();
|
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(0),
|
|
||||||
cron::delay(100),
|
cron::delay(100),
|
||||||
cron::repeat(10),
|
cron::repeat(10),
|
||||||
fee(XRP(1)),
|
fee(XRP(1)),
|
||||||
@@ -277,7 +243,6 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
BEAST_EXPECT(cronSle2);
|
BEAST_EXPECT(cronSle2);
|
||||||
BEAST_EXPECT(cronSle2->getFieldU32(sfDelaySeconds) == 100);
|
BEAST_EXPECT(cronSle2->getFieldU32(sfDelaySeconds) == 100);
|
||||||
BEAST_EXPECT(cronSle2->getFieldU32(sfRepeatCount) == 10);
|
BEAST_EXPECT(cronSle2->getFieldU32(sfRepeatCount) == 10);
|
||||||
BEAST_EXPECT(cronSle2->getFieldU32(sfStartTime) == parentCloseTime);
|
|
||||||
|
|
||||||
// delete cron
|
// delete cron
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
@@ -324,7 +289,6 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
auto repeatCount = 10;
|
auto repeatCount = 10;
|
||||||
|
|
||||||
env(cron::set(alice),
|
env(cron::set(alice),
|
||||||
cron::startTime(baseTime + 100),
|
|
||||||
cron::delay(100),
|
cron::delay(100),
|
||||||
cron::repeat(repeatCount),
|
cron::repeat(repeatCount),
|
||||||
fee(XRP(1)));
|
fee(XRP(1)));
|
||||||
@@ -347,7 +311,7 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
}
|
}
|
||||||
|
|
||||||
// close after 100 seconds passed
|
// close after 100 seconds passed
|
||||||
env.close(10s);
|
env.close();
|
||||||
|
|
||||||
auto txns = env.closed()->txs;
|
auto txns = env.closed()->txs;
|
||||||
auto size = std::distance(txns.begin(), txns.end());
|
auto size = std::distance(txns.begin(), txns.end());
|
||||||
@@ -384,7 +348,8 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
cronSle->getAccountID(sfOwner) == alice.id());
|
cronSle->getAccountID(sfOwner) == alice.id());
|
||||||
|
|
||||||
// set new base time
|
// set new base time
|
||||||
baseTime = baseTime + 100;
|
baseTime =
|
||||||
|
env.timeKeeper().now().time_since_epoch().count();
|
||||||
lastCronKeylet = cronKeylet;
|
lastCronKeylet = cronKeylet;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@@ -415,7 +380,7 @@ struct Cron_test : public beast::unit_test::suite
|
|||||||
|
|
||||||
for (auto const& account : accounts)
|
for (auto const& account : accounts)
|
||||||
{
|
{
|
||||||
env(cron::set(account), cron::startTime(0), fee(XRP(1)));
|
env(cron::set(account), cron::delay(0), fee(XRP(1)));
|
||||||
}
|
}
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
|
|||||||
@@ -6297,7 +6297,6 @@ private:
|
|||||||
|
|
||||||
// cron set
|
// cron set
|
||||||
env(cron::set(account),
|
env(cron::set(account),
|
||||||
cron::startTime(0),
|
|
||||||
cron::delay(100),
|
cron::delay(100),
|
||||||
cron::repeat(1),
|
cron::repeat(1),
|
||||||
fee(XRP(1)),
|
fee(XRP(1)),
|
||||||
@@ -6333,11 +6332,8 @@ private:
|
|||||||
env.fund(XRP(1000), account);
|
env.fund(XRP(1000), account);
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
auto const baseTime =
|
|
||||||
env.current()->parentCloseTime().time_since_epoch().count();
|
|
||||||
// cron set
|
// cron set
|
||||||
env(cron::set(account),
|
env(cron::set(account),
|
||||||
cron::startTime(baseTime + 100),
|
|
||||||
cron::delay(100),
|
cron::delay(100),
|
||||||
cron::repeat(1),
|
cron::repeat(1),
|
||||||
fee(XRP(1)),
|
fee(XRP(1)),
|
||||||
|
|||||||
@@ -357,32 +357,6 @@ class UNLReport_test : public beast::unit_test::suite
|
|||||||
BEAST_EXPECT(isImportVL(env, ivlKeys[0]) == true);
|
BEAST_EXPECT(isImportVL(env, ivlKeys[0]) == true);
|
||||||
BEAST_EXPECT(isImportVL(env, ivlKeys[1]) == false);
|
BEAST_EXPECT(isImportVL(env, ivlKeys[1]) == false);
|
||||||
BEAST_EXPECT(isActiveValidator(env, vlKeys[0]) == true);
|
BEAST_EXPECT(isActiveValidator(env, vlKeys[0]) == true);
|
||||||
|
|
||||||
// now test unrecognised keys that are already present in the ledger
|
|
||||||
// object (flap fix)
|
|
||||||
l = std::make_shared<Ledger>(
|
|
||||||
*l, env.app().timeKeeper().closeTime());
|
|
||||||
|
|
||||||
// insert a ttUNL_REPORT pseudo into the open ledger
|
|
||||||
env.app().openLedger().modify(
|
|
||||||
[&](OpenView& view, beast::Journal j) -> bool {
|
|
||||||
STTx tx = createUNLRTx(l->seq(), ivlKeys[1], vlKeys[0]);
|
|
||||||
uint256 txID = tx.getTransactionID();
|
|
||||||
auto s = std::make_shared<ripple::Serializer>();
|
|
||||||
tx.add(*s);
|
|
||||||
env.app().getHashRouter().setFlags(txID, SF_PRIVATE2);
|
|
||||||
view.rawTxInsert(txID, std::move(s), nullptr);
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
|
|
||||||
BEAST_EXPECT(hasUNLReport(env) == true);
|
|
||||||
|
|
||||||
// close the ledger
|
|
||||||
env.close();
|
|
||||||
|
|
||||||
BEAST_EXPECT(isImportVL(env, ivlKeys[0]) == true);
|
|
||||||
BEAST_EXPECT(isImportVL(env, ivlKeys[1]) == false);
|
|
||||||
BEAST_EXPECT(isActiveValidator(env, vlKeys[0]) == true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -34,21 +34,6 @@ namespace cron {
|
|||||||
Json::Value
|
Json::Value
|
||||||
set(jtx::Account const& account);
|
set(jtx::Account const& account);
|
||||||
|
|
||||||
/** Sets the optional StartTime on a JTx. */
|
|
||||||
class startTime
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
uint32_t startTime_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit startTime(uint32_t startTime) : startTime_(startTime)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
operator()(Env&, JTx& jtx) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Sets the optional DelaySeconds on a JTx. */
|
/** Sets the optional DelaySeconds on a JTx. */
|
||||||
class delay
|
class delay
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -37,12 +37,6 @@ set(jtx::Account const& account)
|
|||||||
return jv;
|
return jv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
startTime::operator()(Env& env, JTx& jt) const
|
|
||||||
{
|
|
||||||
jt.jv[sfStartTime.jsonName] = startTime_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
delay::operator()(Env& env, JTx& jt) const
|
delay::operator()(Env& env, JTx& jt) const
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,357 +0,0 @@
|
|||||||
//------------------------------------------------------------------------------
|
|
||||||
/*
|
|
||||||
This file is part of rippled: https://github.com/ripple/rippled
|
|
||||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and/or distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
//==============================================================================
|
|
||||||
|
|
||||||
#include <test/nodestore/TestBase.h>
|
|
||||||
#include <test/unit_test/SuiteJournal.h>
|
|
||||||
|
|
||||||
#include <ripple/basics/BasicConfig.h>
|
|
||||||
#include <ripple/basics/ByteUtilities.h>
|
|
||||||
#include <ripple/beast/utility/temp_dir.h>
|
|
||||||
#include <ripple/nodestore/DummyScheduler.h>
|
|
||||||
#include <ripple/nodestore/Manager.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
namespace ripple {
|
|
||||||
namespace NodeStore {
|
|
||||||
|
|
||||||
class NuDBFactory_test : public TestBase
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
// Helper function to create a Section with specified parameters
|
|
||||||
Section
|
|
||||||
createSection(std::string const& path, std::string const& blockSize = "")
|
|
||||||
{
|
|
||||||
Section params;
|
|
||||||
params.set("type", "nudb");
|
|
||||||
params.set("path", path);
|
|
||||||
if (!blockSize.empty())
|
|
||||||
params.set("nudb_block_size", blockSize);
|
|
||||||
return params;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function to create a backend and test basic functionality
|
|
||||||
bool
|
|
||||||
testBackendFunctionality(
|
|
||||||
Section const& params,
|
|
||||||
std::size_t expectedBlocksize)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
|
||||||
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
|
|
||||||
if (!BEAST_EXPECT(backend))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
backend->open();
|
|
||||||
|
|
||||||
if (!BEAST_EXPECT(backend->isOpen()))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
// Test basic store/fetch functionality
|
|
||||||
auto batch = createPredictableBatch(10, 12345);
|
|
||||||
storeBatch(*backend, batch);
|
|
||||||
|
|
||||||
Batch copy;
|
|
||||||
fetchCopyOfBatch(*backend, ©, batch);
|
|
||||||
|
|
||||||
backend->close();
|
|
||||||
|
|
||||||
return areBatchesEqual(batch, copy);
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function to test log messages
|
|
||||||
void
|
|
||||||
testLogMessage(
|
|
||||||
Section const& params,
|
|
||||||
beast::severities::Severity level,
|
|
||||||
std::string const& expectedMessage)
|
|
||||||
{
|
|
||||||
test::StreamSink sink(level);
|
|
||||||
beast::Journal journal(sink);
|
|
||||||
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
|
|
||||||
std::string logOutput = sink.messages().str();
|
|
||||||
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
void
|
|
||||||
testDefaultBlockSize()
|
|
||||||
{
|
|
||||||
testcase("Default block size (no nudb_block_size specified)");
|
|
||||||
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path());
|
|
||||||
|
|
||||||
// Should work with default 4096 block size
|
|
||||||
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
testValidBlockSizes()
|
|
||||||
{
|
|
||||||
testcase("Valid block sizes");
|
|
||||||
|
|
||||||
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
|
|
||||||
|
|
||||||
for (auto const& size : validSizes)
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), to_string(size));
|
|
||||||
|
|
||||||
BEAST_EXPECT(testBackendFunctionality(params, size));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
testInvalidBlockSizes()
|
|
||||||
{
|
|
||||||
testcase("Invalid block sizes");
|
|
||||||
|
|
||||||
std::vector<std::string> invalidSizes = {
|
|
||||||
"2048", // Too small
|
|
||||||
"1024", // Too small
|
|
||||||
"65536", // Too large
|
|
||||||
"131072", // Too large
|
|
||||||
"5000", // Not power of 2
|
|
||||||
"6000", // Not power of 2
|
|
||||||
"10000", // Not power of 2
|
|
||||||
"0", // Zero
|
|
||||||
"-1", // Negative
|
|
||||||
"abc", // Non-numeric
|
|
||||||
"4k", // Invalid format
|
|
||||||
"4096.5" // Decimal
|
|
||||||
};
|
|
||||||
|
|
||||||
for (auto const& size : invalidSizes)
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), size);
|
|
||||||
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
|
||||||
|
|
||||||
// Should throw exception for invalid sizes
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
// If we get here, the test failed
|
|
||||||
BEAST_EXPECT(false);
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
// Expected exception
|
|
||||||
std::string error{e.what()};
|
|
||||||
BEAST_EXPECT(
|
|
||||||
error.find("Invalid nudb_block_size") != std::string::npos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
testLogMessages()
|
|
||||||
{
|
|
||||||
testcase("Log message verification");
|
|
||||||
|
|
||||||
// Test valid custom block size logging
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), "8192");
|
|
||||||
|
|
||||||
testLogMessage(
|
|
||||||
params,
|
|
||||||
beast::severities::kInfo,
|
|
||||||
"Using custom NuDB block size: 8192");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test invalid block size exception message
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), "5000");
|
|
||||||
|
|
||||||
test::StreamSink sink(beast::severities::kWarning);
|
|
||||||
beast::Journal journal(sink);
|
|
||||||
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
fail();
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
std::string logOutput{e.what()};
|
|
||||||
BEAST_EXPECT(
|
|
||||||
logOutput.find("Invalid nudb_block_size: 5000") !=
|
|
||||||
std::string::npos);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
logOutput.find(
|
|
||||||
"Must be power of 2 between 4096 and 32768") !=
|
|
||||||
std::string::npos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test non-numeric value exception message
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), "invalid");
|
|
||||||
|
|
||||||
test::StreamSink sink(beast::severities::kWarning);
|
|
||||||
beast::Journal journal(sink);
|
|
||||||
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
|
|
||||||
fail();
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
std::string logOutput{e.what()};
|
|
||||||
BEAST_EXPECT(
|
|
||||||
logOutput.find("Invalid nudb_block_size value: invalid") !=
|
|
||||||
std::string::npos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
testPowerOfTwoValidation()
|
|
||||||
{
|
|
||||||
testcase("Power of 2 validation logic");
|
|
||||||
|
|
||||||
// Test edge cases around valid range
|
|
||||||
std::vector<std::pair<std::string, bool>> testCases = {
|
|
||||||
{"4095", false}, // Just below minimum
|
|
||||||
{"4096", true}, // Minimum valid
|
|
||||||
{"4097", false}, // Just above minimum, not power of 2
|
|
||||||
{"8192", true}, // Valid power of 2
|
|
||||||
{"8193", false}, // Just above valid power of 2
|
|
||||||
{"16384", true}, // Valid power of 2
|
|
||||||
{"32768", true}, // Maximum valid
|
|
||||||
{"32769", false}, // Just above maximum
|
|
||||||
{"65536", false} // Power of 2 but too large
|
|
||||||
};
|
|
||||||
|
|
||||||
for (auto const& [size, shouldWork] : testCases)
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), size);
|
|
||||||
|
|
||||||
test::StreamSink sink(beast::severities::kWarning);
|
|
||||||
beast::Journal journal(sink);
|
|
||||||
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
BEAST_EXPECT(shouldWork);
|
|
||||||
}
|
|
||||||
catch (std::exception const& e)
|
|
||||||
{
|
|
||||||
std::string logOutput{e.what()};
|
|
||||||
BEAST_EXPECT(
|
|
||||||
logOutput.find("Invalid nudb_block_size") !=
|
|
||||||
std::string::npos);
|
|
||||||
BEAST_EXPECT(!shouldWork);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
testDataPersistence()
|
|
||||||
{
|
|
||||||
testcase("Data persistence with different block sizes");
|
|
||||||
|
|
||||||
std::vector<std::string> blockSizes = {
|
|
||||||
"4096", "8192", "16384", "32768"};
|
|
||||||
|
|
||||||
for (auto const& size : blockSizes)
|
|
||||||
{
|
|
||||||
beast::temp_dir tempDir;
|
|
||||||
auto params = createSection(tempDir.path(), size);
|
|
||||||
|
|
||||||
DummyScheduler scheduler;
|
|
||||||
test::SuiteJournal journal("NuDBFactory_test", *this);
|
|
||||||
|
|
||||||
// Create test data
|
|
||||||
auto batch = createPredictableBatch(50, 54321);
|
|
||||||
|
|
||||||
// Store data
|
|
||||||
{
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
backend->open();
|
|
||||||
storeBatch(*backend, batch);
|
|
||||||
backend->close();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve data in new backend instance
|
|
||||||
{
|
|
||||||
auto backend = Manager::instance().make_Backend(
|
|
||||||
params, megabytes(4), scheduler, journal);
|
|
||||||
backend->open();
|
|
||||||
|
|
||||||
Batch copy;
|
|
||||||
fetchCopyOfBatch(*backend, ©, batch);
|
|
||||||
|
|
||||||
BEAST_EXPECT(areBatchesEqual(batch, copy));
|
|
||||||
backend->close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
run() override
|
|
||||||
{
|
|
||||||
testDefaultBlockSize();
|
|
||||||
testValidBlockSizes();
|
|
||||||
testInvalidBlockSizes();
|
|
||||||
testLogMessages();
|
|
||||||
testPowerOfTwoValidation();
|
|
||||||
testDataPersistence();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
|
|
||||||
|
|
||||||
} // namespace NodeStore
|
|
||||||
} // namespace ripple
|
|
||||||
Reference in New Issue
Block a user