mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-02 00:45:58 +00:00
Compare commits
8 Commits
ximinez/di
...
ximinez/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06b4e84654 | ||
|
|
80579dca62 | ||
|
|
37c3133a91 | ||
|
|
5c31b55357 | ||
|
|
cc26829d32 | ||
|
|
ebb05195cf | ||
|
|
bd7a9051db | ||
|
|
f4d55c8b77 |
@@ -14,6 +14,7 @@
|
||||
// Add new amendments to the top of this list.
|
||||
// Keep it sorted in reverse chronological order.
|
||||
|
||||
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (DirectoryLimit, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (IncludeKeyletFields, Supported::yes, VoteBehavior::DefaultNo)
|
||||
@@ -30,7 +31,6 @@ XRPL_FEATURE(Batch, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FEATURE(SingleAssetVault, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (PayChanCancelAfter, Supported::yes, VoteBehavior::DefaultNo)
|
||||
// Check flags in Credential transactions
|
||||
XRPL_FEATURE(DefragDirectories, Supported::no, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (FrozenLPTokenTransfer, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo)
|
||||
|
||||
@@ -8,136 +8,6 @@
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace directory {
|
||||
|
||||
struct Gap
|
||||
{
|
||||
uint64_t const page;
|
||||
SLE::pointer node;
|
||||
uint64_t const nextPage;
|
||||
SLE::pointer next;
|
||||
};
|
||||
|
||||
std::uint64_t
|
||||
createRoot(
|
||||
ApplyView& view,
|
||||
Keylet const& directory,
|
||||
uint256 const& key,
|
||||
std::function<void(std::shared_ptr<SLE> const&)> const& describe)
|
||||
{
|
||||
auto newRoot = std::make_shared<SLE>(directory);
|
||||
newRoot->setFieldH256(sfRootIndex, directory.key);
|
||||
describe(newRoot);
|
||||
|
||||
STVector256 v;
|
||||
v.push_back(key);
|
||||
newRoot->setFieldV256(sfIndexes, v);
|
||||
|
||||
view.insert(newRoot);
|
||||
return std::uint64_t{0};
|
||||
}
|
||||
|
||||
auto
|
||||
findPreviousPage(ApplyView& view, Keylet const& directory, SLE::ref start)
|
||||
{
|
||||
std::uint64_t page = start->getFieldU64(sfIndexPrevious);
|
||||
|
||||
auto node = start;
|
||||
|
||||
if (page)
|
||||
{
|
||||
node = view.peek(keylet::page(directory, page));
|
||||
if (!node)
|
||||
LogicError("Directory chain: root back-pointer broken.");
|
||||
}
|
||||
|
||||
auto indexes = node->getFieldV256(sfIndexes);
|
||||
return std::make_tuple(page, node, indexes);
|
||||
}
|
||||
|
||||
std::uint64_t
|
||||
insertKey(
|
||||
ApplyView& view,
|
||||
SLE::ref node,
|
||||
std::uint64_t page,
|
||||
bool preserveOrder,
|
||||
STVector256& indexes,
|
||||
uint256 const& key)
|
||||
{
|
||||
if (preserveOrder)
|
||||
{
|
||||
if (std::find(indexes.begin(), indexes.end(), key) != indexes.end())
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.push_back(key);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We can't be sure if this page is already sorted because
|
||||
// it may be a legacy page we haven't yet touched. Take
|
||||
// the time to sort it.
|
||||
std::sort(indexes.begin(), indexes.end());
|
||||
|
||||
auto pos = std::lower_bound(indexes.begin(), indexes.end(), key);
|
||||
|
||||
if (pos != indexes.end() && key == *pos)
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.insert(pos, key);
|
||||
}
|
||||
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
view.update(node);
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<std::uint64_t>
|
||||
insertPage(
|
||||
ApplyView& view,
|
||||
std::uint64_t page,
|
||||
SLE::pointer node,
|
||||
std::uint64_t nextPage,
|
||||
SLE::ref next,
|
||||
uint256 const& key,
|
||||
Keylet const& directory,
|
||||
std::function<void(std::shared_ptr<SLE> const&)> const& describe)
|
||||
{
|
||||
// Check whether we're out of pages.
|
||||
if (++page >= dirNodeMaxPages)
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// We are about to create a new node; we'll link it to
|
||||
// the chain first:
|
||||
node->setFieldU64(sfIndexNext, page);
|
||||
view.update(node);
|
||||
|
||||
next->setFieldU64(sfIndexPrevious, page);
|
||||
view.update(next);
|
||||
|
||||
// Insert the new key:
|
||||
STVector256 indexes;
|
||||
indexes.push_back(key);
|
||||
|
||||
node = std::make_shared<SLE>(keylet::page(directory, page));
|
||||
node->setFieldH256(sfRootIndex, directory.key);
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
|
||||
// Save some space by not specifying the value 0 since
|
||||
// it's the default.
|
||||
if (page != 1)
|
||||
node->setFieldU64(sfIndexPrevious, page - 1);
|
||||
if (nextPage)
|
||||
node->setFieldU64(sfIndexNext, nextPage);
|
||||
describe(node);
|
||||
view.insert(node);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
} // namespace directory
|
||||
|
||||
std::optional<std::uint64_t>
|
||||
ApplyView::dirAdd(
|
||||
bool preserveOrder,
|
||||
@@ -145,15 +15,66 @@ ApplyView::dirAdd(
|
||||
uint256 const& key,
|
||||
std::function<void(std::shared_ptr<SLE> const&)> const& describe)
|
||||
{
|
||||
auto const root = peek(directory);
|
||||
auto root = peek(directory);
|
||||
|
||||
if (!root)
|
||||
{
|
||||
// No root, make it.
|
||||
return directory::createRoot(*this, directory, key, describe);
|
||||
root = std::make_shared<SLE>(directory);
|
||||
root->setFieldH256(sfRootIndex, directory.key);
|
||||
describe(root);
|
||||
|
||||
STVector256 v;
|
||||
v.push_back(key);
|
||||
root->setFieldV256(sfIndexes, v);
|
||||
|
||||
insert(root);
|
||||
return std::uint64_t{0};
|
||||
}
|
||||
|
||||
std::uint64_t page = root->getFieldU64(sfIndexPrevious);
|
||||
|
||||
auto node = root;
|
||||
|
||||
if (page)
|
||||
{
|
||||
node = peek(keylet::page(directory, page));
|
||||
if (!node)
|
||||
LogicError("Directory chain: root back-pointer broken.");
|
||||
}
|
||||
|
||||
auto indexes = node->getFieldV256(sfIndexes);
|
||||
|
||||
// If there's space, we use it:
|
||||
if (indexes.size() < dirNodeMaxEntries)
|
||||
{
|
||||
if (preserveOrder)
|
||||
{
|
||||
if (std::find(indexes.begin(), indexes.end(), key) != indexes.end())
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.push_back(key);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We can't be sure if this page is already sorted because
|
||||
// it may be a legacy page we haven't yet touched. Take
|
||||
// the time to sort it.
|
||||
std::sort(indexes.begin(), indexes.end());
|
||||
|
||||
auto pos = std::lower_bound(indexes.begin(), indexes.end(), key);
|
||||
|
||||
if (pos != indexes.end() && key == *pos)
|
||||
LogicError("dirInsert: double insertion");
|
||||
|
||||
indexes.insert(pos, key);
|
||||
}
|
||||
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
update(node);
|
||||
return page;
|
||||
}
|
||||
|
||||
//--Conflict start
|
||||
// We rely on modulo arithmetic of unsigned integers (guaranteed in
|
||||
// [basic.fundamental] paragraph 2) to detect page representation overflow.
|
||||
// For signed integers this would be UB, hence static_assert here.
|
||||
@@ -171,61 +92,30 @@ ApplyView::dirAdd(
|
||||
page >= dirNodeMaxPages) // Old pages limit
|
||||
return std::nullopt;
|
||||
|
||||
// The block above and below this comment conflicted, and I don't
|
||||
// fee like resolving it right now, so I'm saving it for later.
|
||||
// Because page is defined twice, it won't build.
|
||||
// We are about to create a new node; we'll link it to
|
||||
// the chain first:
|
||||
node->setFieldU64(sfIndexNext, page);
|
||||
update(node);
|
||||
|
||||
auto [page, node, indexes] =
|
||||
directory::findPreviousPage(*this, directory, root);
|
||||
//--Conflict end
|
||||
root->setFieldU64(sfIndexPrevious, page);
|
||||
update(root);
|
||||
|
||||
if (rules().enabled(featureDefragDirectories))
|
||||
{
|
||||
// If there are more nodes than just the root, and there's no space in
|
||||
// the last one, walk backwards to find one with space, or to find one
|
||||
// missing.
|
||||
std::optional<directory::Gap> gapPages;
|
||||
while (page && indexes.size() >= dirNodeMaxEntries)
|
||||
{
|
||||
// Find a page with space, or a gap in pages.
|
||||
auto [prevPage, prevNode, prevIndexes] =
|
||||
directory::findPreviousPage(*this, directory, node);
|
||||
if (!gapPages && prevPage != page - 1)
|
||||
gapPages.emplace(prevPage, prevNode, page, node);
|
||||
page = prevPage;
|
||||
node = prevNode;
|
||||
indexes = prevIndexes;
|
||||
}
|
||||
// We looped through all the pages back to the root.
|
||||
if (!page)
|
||||
{
|
||||
// If we found a gap, use it.
|
||||
if (gapPages)
|
||||
{
|
||||
return directory::insertPage(
|
||||
*this,
|
||||
gapPages->page,
|
||||
gapPages->node,
|
||||
gapPages->nextPage,
|
||||
gapPages->next,
|
||||
key,
|
||||
directory,
|
||||
describe);
|
||||
}
|
||||
std::tie(page, node, indexes) =
|
||||
directory::findPreviousPage(*this, directory, root);
|
||||
}
|
||||
}
|
||||
// Insert the new key:
|
||||
indexes.clear();
|
||||
indexes.push_back(key);
|
||||
|
||||
// If there's space, we use it:
|
||||
if (indexes.size() < dirNodeMaxEntries)
|
||||
{
|
||||
return directory::insertKey(
|
||||
*this, node, page, preserveOrder, indexes, key);
|
||||
}
|
||||
node = std::make_shared<SLE>(keylet::page(directory, page));
|
||||
node->setFieldH256(sfRootIndex, directory.key);
|
||||
node->setFieldV256(sfIndexes, indexes);
|
||||
|
||||
return directory::insertPage(
|
||||
*this, page, node, 0, root, key, directory, describe);
|
||||
// Save some space by not specifying the value 0 since
|
||||
// it's the default.
|
||||
if (page != 1)
|
||||
node->setFieldU64(sfIndexPrevious, page - 1);
|
||||
describe(node);
|
||||
insert(node);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
@@ -2295,9 +2295,12 @@ class Batch_test : public beast::unit_test::suite
|
||||
Serializer s;
|
||||
parsed.object->add(s);
|
||||
auto const jrr = env.rpc("submit", strHex(s.slice()))[jss::result];
|
||||
BEAST_EXPECT(
|
||||
jrr[jss::status] == "success" &&
|
||||
jrr[jss::engine_result] == "temINVALID_FLAG");
|
||||
BEAST_EXPECTS(
|
||||
jrr[jss::status] == "error" &&
|
||||
jrr[jss::error] == "invalidTransaction" &&
|
||||
jrr[jss::error_exception] ==
|
||||
"fails local checks: Empty SigningPubKey.",
|
||||
to_string(jrr));
|
||||
|
||||
env.close();
|
||||
}
|
||||
@@ -4151,6 +4154,7 @@ public:
|
||||
using namespace test::jtx;
|
||||
auto const sa = testable_amendments();
|
||||
testWithFeats(sa);
|
||||
testWithFeats(sa - fixBatchInnerSigs);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -49,8 +49,7 @@ public:
|
||||
inner(
|
||||
Json::Value const& txn,
|
||||
std::uint32_t const& sequence,
|
||||
std::optional<std::uint32_t> const& ticket = std::nullopt,
|
||||
std::optional<std::uint32_t> const& fee = std::nullopt)
|
||||
std::optional<std::uint32_t> const& ticket = std::nullopt)
|
||||
: txn_(txn), seq_(sequence), ticket_(ticket)
|
||||
{
|
||||
txn_[jss::SigningPubKey] = "";
|
||||
|
||||
@@ -106,7 +106,10 @@ OpenLedger::accept(
|
||||
|
||||
// skip batch txns
|
||||
// LCOV_EXCL_START
|
||||
if (tx->isFlag(tfInnerBatchTxn) && rules.enabled(featureBatch))
|
||||
// The flag should only be settable if Batch feature is enabled. If
|
||||
// Batch is not enabled, the flag is always invalid, so don't relay it
|
||||
// regardless.
|
||||
if (tx->isFlag(tfInnerBatchTxn))
|
||||
{
|
||||
XRPL_ASSERT(
|
||||
txpair.second && txpair.second->isFieldPresent(sfParentBatchID),
|
||||
|
||||
@@ -1677,10 +1677,11 @@ NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
|
||||
app_.getHashRouter().shouldRelay(e.transaction->getID());
|
||||
if (auto const sttx = *(e.transaction->getSTransaction());
|
||||
toSkip &&
|
||||
// Skip relaying if it's an inner batch txn and batch
|
||||
// feature is enabled
|
||||
!(sttx.isFlag(tfInnerBatchTxn) &&
|
||||
newOL->rules().enabled(featureBatch)))
|
||||
// Skip relaying if it's an inner batch txn. The flag should
|
||||
// only be set if the Batch feature is enabled. If Batch is
|
||||
// not enabled, the flag is always invalid, so don't relay
|
||||
// it regardless.
|
||||
!(sttx.isFlag(tfInnerBatchTxn)))
|
||||
{
|
||||
protocol::TMTransaction tx;
|
||||
Serializer s;
|
||||
@@ -3040,9 +3041,11 @@ NetworkOPsImp::pubProposedTransaction(
|
||||
std::shared_ptr<STTx const> const& transaction,
|
||||
TER result)
|
||||
{
|
||||
// never publish an inner txn inside a batch txn
|
||||
if (transaction->isFlag(tfInnerBatchTxn) &&
|
||||
ledger->rules().enabled(featureBatch))
|
||||
// never publish an inner txn inside a batch txn. The flag should
|
||||
// only be set if the Batch feature is enabled. If Batch is not
|
||||
// enabled, the flag is always invalid, so don't publish it
|
||||
// regardless.
|
||||
if (transaction->isFlag(tfInnerBatchTxn))
|
||||
return;
|
||||
|
||||
MultiApiJson jvObj =
|
||||
|
||||
@@ -204,12 +204,17 @@ Transactor::preflight2(PreflightContext const& ctx)
|
||||
// regardless of success or failure
|
||||
return *ret;
|
||||
|
||||
auto const sigValid = checkValidity(
|
||||
ctx.app.getHashRouter(), ctx.tx, ctx.rules, ctx.app.config());
|
||||
if (sigValid.first == Validity::SigBad)
|
||||
// Skip signature check on batch inner transactions
|
||||
if (!ctx.tx.isFlag(tfInnerBatchTxn) || !ctx.rules.enabled(featureBatch))
|
||||
{
|
||||
JLOG(ctx.j.debug()) << "preflight2: bad signature. " << sigValid.second;
|
||||
return temINVALID; // LCOV_EXCL_LINE
|
||||
auto const sigValid = checkValidity(
|
||||
ctx.app.getHashRouter(), ctx.tx, ctx.rules, ctx.app.config());
|
||||
if (sigValid.first == Validity::SigBad)
|
||||
{
|
||||
JLOG(ctx.j.debug())
|
||||
<< "preflight2: bad signature. " << sigValid.second;
|
||||
return temINVALID; // LCOV_EXCL_LINE
|
||||
}
|
||||
}
|
||||
return tesSUCCESS;
|
||||
}
|
||||
@@ -642,13 +647,17 @@ NotTEC
|
||||
Transactor::checkSign(
|
||||
ReadView const& view,
|
||||
ApplyFlags flags,
|
||||
std::optional<uint256 const> const& parentBatchId,
|
||||
AccountID const& idAccount,
|
||||
STObject const& sigObject,
|
||||
beast::Journal const j)
|
||||
{
|
||||
auto const pkSigner = sigObject.getFieldVL(sfSigningPubKey);
|
||||
// Ignore signature check on batch inner transactions
|
||||
if (sigObject.isFlag(tfInnerBatchTxn) && view.rules().enabled(featureBatch))
|
||||
bool const useCtx = view.rules().enabled(fixBatchInnerSigs);
|
||||
if ((useCtx ? parentBatchId.has_value()
|
||||
: sigObject.isFlag(tfInnerBatchTxn)) &&
|
||||
view.rules().enabled(featureBatch))
|
||||
{
|
||||
// Defensive Check: These values are also checked in Batch::preflight
|
||||
if (sigObject.isFieldPresent(sfTxnSignature) || !pkSigner.empty() ||
|
||||
@@ -701,7 +710,8 @@ Transactor::checkSign(PreclaimContext const& ctx)
|
||||
auto const idAccount = ctx.tx.isFieldPresent(sfDelegate)
|
||||
? ctx.tx.getAccountID(sfDelegate)
|
||||
: ctx.tx.getAccountID(sfAccount);
|
||||
return checkSign(ctx.view, ctx.flags, idAccount, ctx.tx, ctx.j);
|
||||
return checkSign(
|
||||
ctx.view, ctx.flags, ctx.parentBatchId, idAccount, ctx.tx, ctx.j);
|
||||
}
|
||||
|
||||
NotTEC
|
||||
|
||||
@@ -266,6 +266,7 @@ protected:
|
||||
checkSign(
|
||||
ReadView const& view,
|
||||
ApplyFlags flags,
|
||||
std::optional<uint256 const> const& parentBatchId,
|
||||
AccountID const& idAccount,
|
||||
STObject const& sigObject,
|
||||
beast::Journal const j);
|
||||
|
||||
@@ -41,15 +41,22 @@ checkValidity(
|
||||
Validity::SigBad,
|
||||
"Malformed: Invalid inner batch transaction."};
|
||||
|
||||
std::string reason;
|
||||
if (!passesLocalChecks(tx, reason))
|
||||
// This block should probably have never been included in the
|
||||
// original `Batch` implementation. An inner transaction never
|
||||
// has a valid signature.
|
||||
bool const neverValid = rules.enabled(fixBatchInnerSigs);
|
||||
if (!neverValid)
|
||||
{
|
||||
router.setFlags(id, SF_LOCALBAD);
|
||||
return {Validity::SigGoodOnly, reason};
|
||||
}
|
||||
std::string reason;
|
||||
if (!passesLocalChecks(tx, reason))
|
||||
{
|
||||
router.setFlags(id, SF_LOCALBAD);
|
||||
return {Validity::SigGoodOnly, reason};
|
||||
}
|
||||
|
||||
router.setFlags(id, SF_SIGGOOD);
|
||||
return {Validity::Valid, ""};
|
||||
router.setFlags(id, SF_SIGGOOD);
|
||||
return {Validity::Valid, ""};
|
||||
}
|
||||
}
|
||||
|
||||
if (any(flags & SF_SIGBAD))
|
||||
|
||||
@@ -1365,8 +1365,7 @@ PeerImp::handleTransaction(
|
||||
|
||||
// Charge strongly for attempting to relay a txn with tfInnerBatchTxn
|
||||
// LCOV_EXCL_START
|
||||
if (stx->isFlag(tfInnerBatchTxn) &&
|
||||
getCurrentTransactionRules()->enabled(featureBatch))
|
||||
if (stx->isFlag(tfInnerBatchTxn))
|
||||
{
|
||||
JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
|
||||
"tfInnerBatchTxn (handleTransaction).";
|
||||
@@ -2924,8 +2923,7 @@ PeerImp::checkTransaction(
|
||||
{
|
||||
// charge strongly for relaying batch txns
|
||||
// LCOV_EXCL_START
|
||||
if (stx->isFlag(tfInnerBatchTxn) &&
|
||||
getCurrentTransactionRules()->enabled(featureBatch))
|
||||
if (stx->isFlag(tfInnerBatchTxn))
|
||||
{
|
||||
JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
|
||||
"tfInnerBatchTxn (checkSignature).";
|
||||
|
||||
Reference in New Issue
Block a user