mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-19 02:25:52 +00:00
Address rare corruption of NFTokenPage linked list (#4945)
* Add fixNFTokenPageLinks amendment: It was discovered that under rare circumstances the links between NFTokenPages could be removed. If this happens, then the account_objects and account_nfts RPC commands under-report the NFTokens owned by an account. The fixNFTokenPageLinks amendment does the following to address the problem: - It fixes the underlying problem so no further broken links should be created. - It adds Invariants so, if such damage were introduced in the future, an invariant would stop it. - It adds a new FixLedgerState transaction that repairs directories that were damaged in this fashion. - It adds unit tests for all of it.
This commit is contained in:
@@ -80,7 +80,7 @@ namespace detail {
|
||||
// Feature.cpp. Because it's only used to reserve storage, and determine how
|
||||
// large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than
|
||||
// the actual number of amendments. A LogicError on startup will verify this.
|
||||
static constexpr std::size_t numFeatures = 78;
|
||||
static constexpr std::size_t numFeatures = 79;
|
||||
|
||||
/** Amendments that this server supports and the default voting behavior.
|
||||
Whether they are enabled depends on the Rules defined in the validated
|
||||
@@ -371,6 +371,7 @@ extern uint256 const fixReducedOffersV2;
|
||||
extern uint256 const fixEnforceNFTokenTrustline;
|
||||
extern uint256 const fixInnerObjTemplate2;
|
||||
extern uint256 const featureInvariantsV1_1;
|
||||
extern uint256 const fixNFTokenPageLinks;
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
|
||||
@@ -388,6 +388,7 @@ extern SF_UINT16 const sfHookEmitCount;
|
||||
extern SF_UINT16 const sfHookExecutionIndex;
|
||||
extern SF_UINT16 const sfHookApiVersion;
|
||||
extern SF_UINT16 const sfDiscountedFee;
|
||||
extern SF_UINT16 const sfLedgerFixType;
|
||||
|
||||
// 32-bit integers (common)
|
||||
extern SF_UINT32 const sfNetworkID;
|
||||
|
||||
@@ -182,6 +182,7 @@ enum TEFcodes : TERUnderlyingType {
|
||||
tefTOO_BIG,
|
||||
tefNO_TICKET,
|
||||
tefNFTOKEN_IS_NOT_TRANSFERABLE,
|
||||
tefINVALID_LEDGER_FIX_TYPE,
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -190,13 +190,16 @@ enum TxType : std::uint16_t
|
||||
/** This transaction type deletes a DID */
|
||||
ttDID_DELETE = 50,
|
||||
|
||||
|
||||
/** This transaction type creates an Oracle instance */
|
||||
ttORACLE_SET = 51,
|
||||
|
||||
/** This transaction type deletes an Oracle instance */
|
||||
ttORACLE_DELETE = 52,
|
||||
|
||||
/** This transaction type fixes a problem in the ledger state */
|
||||
ttLEDGER_STATE_FIX = 53,
|
||||
|
||||
|
||||
/** This system-generated transaction type is used to update the status of the various amendments.
|
||||
|
||||
For details, see: https://xrpl.org/amendments.html
|
||||
|
||||
@@ -104,6 +104,7 @@ JSS(NFTokenAcceptOffer); // transaction type.
|
||||
JSS(NFTokenCancelOffer); // transaction type.
|
||||
JSS(NFTokenCreateOffer); // transaction type.
|
||||
JSS(NFTokenPage); // ledger type.
|
||||
JSS(LedgerStateFix); // transaction type.
|
||||
JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens
|
||||
JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens
|
||||
JSS(LPToken); // out: AMM Liquidity Provider tokens info
|
||||
|
||||
@@ -497,6 +497,7 @@ REGISTER_FEATURE(NFTokenMintOffer, Supported::yes, VoteBehavior::De
|
||||
REGISTER_FIX (fixReducedOffersV2, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FIX (fixEnforceNFTokenTrustline, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FIX (fixInnerObjTemplate2, Supported::yes, VoteBehavior::DefaultNo);
|
||||
REGISTER_FIX (fixNFTokenPageLinks, Supported::yes, VoteBehavior::DefaultNo);
|
||||
// InvariantsV1_1 will be changes to Supported::yes when all the
|
||||
// invariants expected to be included under it are complete.
|
||||
REGISTER_FEATURE(InvariantsV1_1, Supported::no, VoteBehavior::DefaultNo);
|
||||
|
||||
@@ -113,6 +113,7 @@ CONSTRUCT_TYPED_SFIELD(sfHookStateChangeCount, "HookStateChangeCount", UINT16,
|
||||
CONSTRUCT_TYPED_SFIELD(sfHookEmitCount, "HookEmitCount", UINT16, 18);
|
||||
CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, 19);
|
||||
CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20);
|
||||
CONSTRUCT_TYPED_SFIELD(sfLedgerFixType, "LedgerFixType", UINT16, 21);
|
||||
|
||||
// 32-bit integers (common)
|
||||
CONSTRUCT_TYPED_SFIELD(sfNetworkID, "NetworkID", UINT32, 1);
|
||||
|
||||
@@ -137,6 +137,7 @@ transResults()
|
||||
MAKE_ERROR(tefTOO_BIG, "Transaction affects too many items."),
|
||||
MAKE_ERROR(tefNO_TICKET, "Ticket is not in ledger."),
|
||||
MAKE_ERROR(tefNFTOKEN_IS_NOT_TRANSFERABLE, "The specified NFToken is not transferable."),
|
||||
MAKE_ERROR(tefINVALID_LEDGER_FIX_TYPE, "The LedgerFixType field has an invalid value."),
|
||||
|
||||
MAKE_ERROR(telLOCAL_ERROR, "Local failure."),
|
||||
MAKE_ERROR(telBAD_DOMAIN, "Domain too long."),
|
||||
|
||||
@@ -505,6 +505,14 @@ TxFormats::TxFormats()
|
||||
{sfOracleDocumentID, soeREQUIRED},
|
||||
},
|
||||
commonFields);
|
||||
|
||||
add(jss::LedgerStateFix,
|
||||
ttLEDGER_STATE_FIX,
|
||||
{
|
||||
{sfLedgerFixType, soeREQUIRED},
|
||||
{sfOwner, soeOPTIONAL},
|
||||
},
|
||||
commonFields);
|
||||
}
|
||||
|
||||
TxFormats const&
|
||||
|
||||
676
src/test/app/FixNFTokenPageLinks_test.cpp
Normal file
676
src/test/app/FixNFTokenPageLinks_test.cpp
Normal file
@@ -0,0 +1,676 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx.h>
|
||||
#include <xrpld/app/tx/detail/ApplyContext.h>
|
||||
#include <xrpld/app/tx/detail/NFTokenUtils.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class FixNFTokenPageLinks_test : public beast::unit_test::suite
|
||||
{
|
||||
// Helper function that returns the owner count of an account root.
|
||||
static std::uint32_t
|
||||
ownerCount(test::jtx::Env const& env, test::jtx::Account const& acct)
|
||||
{
|
||||
std::uint32_t ret{0};
|
||||
if (auto const sleAcct = env.le(acct))
|
||||
ret = sleAcct->at(sfOwnerCount);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Helper function that returns the number of nfts owned by an account.
|
||||
static std::uint32_t
|
||||
nftCount(test::jtx::Env& env, test::jtx::Account const& acct)
|
||||
{
|
||||
Json::Value params;
|
||||
params[jss::account] = acct.human();
|
||||
params[jss::type] = "state";
|
||||
Json::Value nfts = env.rpc("json", "account_nfts", to_string(params));
|
||||
return nfts[jss::result][jss::account_nfts].size();
|
||||
};
|
||||
|
||||
// A helper function that generates 96 nfts packed into three pages
|
||||
// of 32 each. Returns a sorted vector of the NFTokenIDs packed into
|
||||
// the pages.
|
||||
std::vector<uint256>
|
||||
genPackedTokens(test::jtx::Env& env, test::jtx::Account const& owner)
|
||||
{
|
||||
using namespace test::jtx;
|
||||
|
||||
std::vector<uint256> nfts;
|
||||
nfts.reserve(96);
|
||||
|
||||
// We want to create fully packed NFT pages. This is a little
|
||||
// tricky since the system currently in place is inclined to
|
||||
// assign consecutive tokens to only 16 entries per page.
|
||||
//
|
||||
// By manipulating the internal form of the taxon we can force
|
||||
// creation of NFT pages that are completely full. This lambda
|
||||
// tells us the taxon value we should pass in in order for the
|
||||
// internal representation to match the passed in value.
|
||||
auto internalTaxon = [this, &env](
|
||||
Account const& acct,
|
||||
std::uint32_t taxon) -> std::uint32_t {
|
||||
std::uint32_t tokenSeq = [this, &env, &acct]() {
|
||||
auto const le = env.le(acct);
|
||||
if (BEAST_EXPECT(le))
|
||||
return le->at(~sfMintedNFTokens).value_or(0u);
|
||||
return 0u;
|
||||
}();
|
||||
|
||||
// If fixNFTokenRemint amendment is on, we must
|
||||
// add FirstNFTokenSequence.
|
||||
if (env.current()->rules().enabled(fixNFTokenRemint))
|
||||
tokenSeq += env.le(acct)
|
||||
->at(~sfFirstNFTokenSequence)
|
||||
.value_or(env.seq(acct));
|
||||
|
||||
return toUInt32(nft::cipheredTaxon(tokenSeq, nft::toTaxon(taxon)));
|
||||
};
|
||||
|
||||
for (std::uint32_t i = 0; i < 96; ++i)
|
||||
{
|
||||
// In order to fill the pages we use the taxon to break them
|
||||
// into groups of 16 entries. By having the internal
|
||||
// representation of the taxon go...
|
||||
// 0, 3, 2, 5, 4, 7...
|
||||
// in sets of 16 NFTs we can get each page to be fully
|
||||
// populated.
|
||||
std::uint32_t const intTaxon = (i / 16) + (i & 0b10000 ? 2 : 0);
|
||||
uint32_t const extTaxon = internalTaxon(owner, intTaxon);
|
||||
nfts.push_back(
|
||||
token::getNextID(env, owner, extTaxon, tfTransferable));
|
||||
env(token::mint(owner, extTaxon), txflags(tfTransferable));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// Sort the NFTs so they are listed in storage order, not
|
||||
// creation order.
|
||||
std::sort(nfts.begin(), nfts.end());
|
||||
|
||||
// Verify that the owner does indeed have exactly three pages
|
||||
// of NFTs with 32 entries in each page.
|
||||
{
|
||||
Json::Value params;
|
||||
params[jss::account] = owner.human();
|
||||
auto resp = env.rpc("json", "account_objects", to_string(params));
|
||||
|
||||
Json::Value const& acctObjs =
|
||||
resp[jss::result][jss::account_objects];
|
||||
|
||||
int pageCount = 0;
|
||||
for (Json::UInt i = 0; i < acctObjs.size(); ++i)
|
||||
{
|
||||
if (BEAST_EXPECT(
|
||||
acctObjs[i].isMember(sfNFTokens.jsonName) &&
|
||||
acctObjs[i][sfNFTokens.jsonName].isArray()))
|
||||
{
|
||||
BEAST_EXPECT(acctObjs[i][sfNFTokens.jsonName].size() == 32);
|
||||
++pageCount;
|
||||
}
|
||||
}
|
||||
// If this check fails then the internal NFT directory logic
|
||||
// has changed.
|
||||
BEAST_EXPECT(pageCount == 3);
|
||||
}
|
||||
return nfts;
|
||||
};
|
||||
|
||||
void
|
||||
testLedgerStateFixErrors()
|
||||
{
|
||||
testcase("LedgerStateFix error cases");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
Account const alice("alice");
|
||||
|
||||
{
|
||||
// Verify that the LedgerStateFix transaction is disabled
|
||||
// without the fixNFTokenPageLinks amendment.
|
||||
Env env{*this, supported_amendments() - fixNFTokenPageLinks};
|
||||
env.fund(XRP(1000), alice);
|
||||
|
||||
auto const linkFixFee = drops(env.current()->fees().increment);
|
||||
env(ledgerStateFix::nftPageLinks(alice, alice),
|
||||
fee(linkFixFee),
|
||||
ter(temDISABLED));
|
||||
}
|
||||
|
||||
Env env{*this, supported_amendments()};
|
||||
env.fund(XRP(1000), alice);
|
||||
std::uint32_t const ticketSeq = env.seq(alice);
|
||||
env(ticket::create(alice, 1));
|
||||
|
||||
// Preflight
|
||||
|
||||
{
|
||||
// Fail preflight1. Can't combine AcccountTxnID and ticket.
|
||||
Json::Value tx = ledgerStateFix::nftPageLinks(alice, alice);
|
||||
tx[sfAccountTxnID.jsonName] =
|
||||
"00000000000000000000000000000000"
|
||||
"00000000000000000000000000000000";
|
||||
env(tx, ticket::use(ticketSeq), ter(temINVALID));
|
||||
}
|
||||
// Fee too low.
|
||||
env(ledgerStateFix::nftPageLinks(alice, alice), ter(telINSUF_FEE_P));
|
||||
|
||||
// Invalid flags.
|
||||
auto const linkFixFee = drops(env.current()->fees().increment);
|
||||
env(ledgerStateFix::nftPageLinks(alice, alice),
|
||||
fee(linkFixFee),
|
||||
txflags(tfPassive),
|
||||
ter(temINVALID_FLAG));
|
||||
|
||||
{
|
||||
// ledgerStateFix::nftPageLinks requires an Owner field.
|
||||
Json::Value tx = ledgerStateFix::nftPageLinks(alice, alice);
|
||||
tx.removeMember(sfOwner.jsonName);
|
||||
env(tx, fee(linkFixFee), ter(temINVALID));
|
||||
}
|
||||
{
|
||||
// Invalid LedgerFixType codes.
|
||||
Json::Value tx = ledgerStateFix::nftPageLinks(alice, alice);
|
||||
tx[sfLedgerFixType.jsonName] = 0;
|
||||
env(tx, fee(linkFixFee), ter(tefINVALID_LEDGER_FIX_TYPE));
|
||||
|
||||
tx[sfLedgerFixType.jsonName] = 200;
|
||||
env(tx, fee(linkFixFee), ter(tefINVALID_LEDGER_FIX_TYPE));
|
||||
}
|
||||
|
||||
// Preclaim
|
||||
Account const carol("carol");
|
||||
env.memoize(carol);
|
||||
env(ledgerStateFix::nftPageLinks(alice, carol),
|
||||
fee(linkFixFee),
|
||||
ter(tecOBJECT_NOT_FOUND));
|
||||
}
|
||||
|
||||
void
|
||||
testTokenPageLinkErrors()
|
||||
{
|
||||
testcase("NFTokenPageLinkFix error cases");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
Account const alice("alice");
|
||||
|
||||
Env env{*this, supported_amendments()};
|
||||
env.fund(XRP(1000), alice);
|
||||
|
||||
// These cases all return the same TER code, but they exercise
|
||||
// different cases where there is nothing to fix in an owner's
|
||||
// NFToken pages. So they increase test coverage.
|
||||
|
||||
// Owner has no pages to fix.
|
||||
auto const linkFixFee = drops(env.current()->fees().increment);
|
||||
env(ledgerStateFix::nftPageLinks(alice, alice),
|
||||
fee(linkFixFee),
|
||||
ter(tecFAILED_PROCESSING));
|
||||
|
||||
// Alice has only one page.
|
||||
env(token::mint(alice), txflags(tfTransferable));
|
||||
env.close();
|
||||
|
||||
env(ledgerStateFix::nftPageLinks(alice, alice),
|
||||
fee(linkFixFee),
|
||||
ter(tecFAILED_PROCESSING));
|
||||
|
||||
// Alice has at least three pages.
|
||||
for (std::uint32_t i = 0; i < 64; ++i)
|
||||
{
|
||||
env(token::mint(alice), txflags(tfTransferable));
|
||||
env.close();
|
||||
}
|
||||
|
||||
env(ledgerStateFix::nftPageLinks(alice, alice),
|
||||
fee(linkFixFee),
|
||||
ter(tecFAILED_PROCESSING));
|
||||
}
|
||||
|
||||
void
|
||||
testFixNFTokenPageLinks()
|
||||
{
|
||||
// Steps:
|
||||
// 1. Before the fixNFTokenPageLinks amendment is enabled, build the
|
||||
// three kinds of damaged NFToken directories we know about:
|
||||
// A. One where there is only one page, but without the final index.
|
||||
// B. One with multiple pages and a missing final page.
|
||||
// C. One with links missing in the middle of the chain.
|
||||
// 2. Enable the fixNFTokenPageLinks amendment.
|
||||
// 3. Invoke the LedgerStateFix transactor and repair the directories.
|
||||
testcase("Fix links");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
Account const alice("alice");
|
||||
Account const bob("bob");
|
||||
Account const carol("carol");
|
||||
Account const daria("daria");
|
||||
|
||||
Env env{*this, supported_amendments() - fixNFTokenPageLinks};
|
||||
env.fund(XRP(1000), alice, bob, carol, daria);
|
||||
|
||||
//**********************************************************************
|
||||
// Step 1A: Create damaged NFToken directories:
|
||||
// o One where there is only one page, but without the final index.
|
||||
//**********************************************************************
|
||||
|
||||
// alice generates three packed pages.
|
||||
std::vector<uint256> aliceNFTs = genPackedTokens(env, alice);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
// Get the index of the middle page.
|
||||
uint256 const aliceMiddleNFTokenPageIndex = [&env, &alice]() {
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
return lastNFTokenPage->at(sfPreviousPageMin);
|
||||
}();
|
||||
|
||||
// alice burns all the tokens in the first and last pages.
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
env(token::burn(alice, {aliceNFTs[i]}));
|
||||
env.close();
|
||||
}
|
||||
aliceNFTs.erase(aliceNFTs.begin(), aliceNFTs.begin() + 32);
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
env(token::burn(alice, {aliceNFTs.back()}));
|
||||
aliceNFTs.pop_back();
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 1);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 32);
|
||||
|
||||
// Removing the last token from the last page deletes the last
|
||||
// page. This is a bug. The contents of the next-to-last page
|
||||
// should have been moved into the last page.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage_max(alice)));
|
||||
|
||||
// alice's "middle" page is still present, but has no links.
|
||||
{
|
||||
auto aliceMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), aliceMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(aliceMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
!aliceMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
!aliceMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
//**********************************************************************
|
||||
// Step 1B: Create damaged NFToken directories:
|
||||
// o One with multiple pages and a missing final page.
|
||||
//**********************************************************************
|
||||
|
||||
// bob generates three packed pages.
|
||||
std::vector<uint256> bobNFTs = genPackedTokens(env, bob);
|
||||
BEAST_EXPECT(nftCount(env, bob) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, bob) == 3);
|
||||
|
||||
// Get the index of the middle page.
|
||||
uint256 const bobMiddleNFTokenPageIndex = [&env, &bob]() {
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(bob));
|
||||
return lastNFTokenPage->at(sfPreviousPageMin);
|
||||
}();
|
||||
|
||||
// bob burns all the tokens in the very last page.
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
env(token::burn(bob, {bobNFTs.back()}));
|
||||
bobNFTs.pop_back();
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, bob) == 64);
|
||||
BEAST_EXPECT(ownerCount(env, bob) == 2);
|
||||
|
||||
// Removing the last token from the last page deletes the last
|
||||
// page. This is a bug. The contents of the next-to-last page
|
||||
// should have been moved into the last page.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage_max(bob)));
|
||||
|
||||
// bob's "middle" page is still present, but has lost the
|
||||
// NextPageMin field.
|
||||
{
|
||||
auto bobMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(bob), bobMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(bobMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
bobMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!bobMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
//**********************************************************************
|
||||
// Step 1C: Create damaged NFToken directories:
|
||||
// o One with links missing in the middle of the chain.
|
||||
//**********************************************************************
|
||||
|
||||
// carol generates three packed pages.
|
||||
std::vector<uint256> carolNFTs = genPackedTokens(env, carol);
|
||||
BEAST_EXPECT(nftCount(env, carol) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, carol) == 3);
|
||||
|
||||
// Get the index of the middle page.
|
||||
uint256 const carolMiddleNFTokenPageIndex = [&env, &carol]() {
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(carol));
|
||||
return lastNFTokenPage->at(sfPreviousPageMin);
|
||||
}();
|
||||
|
||||
// carol sells all of the tokens in the very last page to daria.
|
||||
std::vector<uint256> dariaNFTs;
|
||||
dariaNFTs.reserve(32);
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
uint256 const offerIndex =
|
||||
keylet::nftoffer(carol, env.seq(carol)).key;
|
||||
env(token::createOffer(carol, carolNFTs.back(), XRP(0)),
|
||||
txflags(tfSellNFToken));
|
||||
env.close();
|
||||
|
||||
env(token::acceptSellOffer(daria, offerIndex));
|
||||
env.close();
|
||||
|
||||
dariaNFTs.push_back(carolNFTs.back());
|
||||
carolNFTs.pop_back();
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, carol) == 64);
|
||||
BEAST_EXPECT(ownerCount(env, carol) == 2);
|
||||
|
||||
// Removing the last token from the last page deletes the last
|
||||
// page. This is a bug. The contents of the next-to-last page
|
||||
// should have been moved into the last page.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage_max(carol)));
|
||||
|
||||
// carol's "middle" page is still present, but has lost the
|
||||
// NextPageMin field.
|
||||
auto carolMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(carolMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
// At this point carol's NFT directory has the same problem that
|
||||
// bob's has: the last page is missing. Now we make things more
|
||||
// complicated by putting the last page back. carol buys their NFTs
|
||||
// back from daria.
|
||||
for (uint256 const& nft : dariaNFTs)
|
||||
{
|
||||
uint256 const offerIndex =
|
||||
keylet::nftoffer(carol, env.seq(carol)).key;
|
||||
env(token::createOffer(carol, nft, drops(1)), token::owner(daria));
|
||||
env.close();
|
||||
|
||||
env(token::acceptBuyOffer(daria, offerIndex));
|
||||
env.close();
|
||||
|
||||
carolNFTs.push_back(nft);
|
||||
}
|
||||
|
||||
// Note that carol actually owns 96 NFTs, but only 64 are reported
|
||||
// because the links are damaged.
|
||||
BEAST_EXPECT(nftCount(env, carol) == 64);
|
||||
BEAST_EXPECT(ownerCount(env, carol) == 3);
|
||||
|
||||
// carol's "middle" page is present and still has no NextPageMin field.
|
||||
{
|
||||
auto carolMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(carolMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
!carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
// carol has a "last" page again, but it has no PreviousPageMin field.
|
||||
{
|
||||
auto carolLastNFTokenPage = env.le(keylet::nftpage_max(carol));
|
||||
|
||||
BEAST_EXPECT(
|
||||
!carolLastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!carolLastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
//**********************************************************************
|
||||
// Step 2: Enable the fixNFTokenPageLinks amendment.
|
||||
//**********************************************************************
|
||||
// Verify that the LedgerStateFix transaction is not enabled.
|
||||
auto const linkFixFee = drops(env.current()->fees().increment);
|
||||
env(ledgerStateFix::nftPageLinks(daria, alice),
|
||||
fee(linkFixFee),
|
||||
ter(temDISABLED));
|
||||
|
||||
// Wait 15 ledgers so the LedgerStateFix transaction is no longer
|
||||
// retried.
|
||||
for (int i = 0; i < 15; ++i)
|
||||
env.close();
|
||||
|
||||
env.enableFeature(fixNFTokenPageLinks);
|
||||
env.close();
|
||||
|
||||
//**********************************************************************
|
||||
// Step 3A: Repair the one-page directory (alice's)
|
||||
//**********************************************************************
|
||||
|
||||
// Verify that alice's NFToken directory is still damaged.
|
||||
|
||||
// alice's last page should still be missing.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage_max(alice)));
|
||||
|
||||
// alice's "middle" page is still present and has no links.
|
||||
{
|
||||
auto aliceMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), aliceMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(aliceMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
!aliceMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
!aliceMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
// The server "remembers" daria's failed nftPageLinks transaction
|
||||
// signature. So we need to advance daria's sequence number before
|
||||
// daria can submit a similar transaction.
|
||||
env(noop(daria));
|
||||
|
||||
// daria fixes the links in alice's NFToken directory.
|
||||
env(ledgerStateFix::nftPageLinks(daria, alice), fee(linkFixFee));
|
||||
env.close();
|
||||
|
||||
// alices's last page should now be present and include no links.
|
||||
{
|
||||
auto aliceLastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(aliceLastNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
!aliceLastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!aliceLastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
// alice's middle page should be gone.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), aliceMiddleNFTokenPageIndex)));
|
||||
|
||||
BEAST_EXPECT(nftCount(env, alice) == 32);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 1);
|
||||
|
||||
//**********************************************************************
|
||||
// Step 3B: Repair the two-page directory (bob's)
|
||||
//**********************************************************************
|
||||
|
||||
// Verify that bob's NFToken directory is still damaged.
|
||||
|
||||
// bob's last page should still be missing.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage_max(bob)));
|
||||
|
||||
// bob's "middle" page is still present and missing NextPageMin.
|
||||
{
|
||||
auto bobMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(bob), bobMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(bobMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
bobMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!bobMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
// daria fixes the links in bob's NFToken directory.
|
||||
env(ledgerStateFix::nftPageLinks(daria, bob), fee(linkFixFee));
|
||||
env.close();
|
||||
|
||||
// bob's last page should now be present and include a previous
|
||||
// link but no next link.
|
||||
{
|
||||
auto const lastPageKeylet = keylet::nftpage_max(bob);
|
||||
auto const bobLastNFTokenPage = env.le(lastPageKeylet);
|
||||
if (!BEAST_EXPECT(bobLastNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(bobLastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
bobLastNFTokenPage->at(sfPreviousPageMin) !=
|
||||
bobMiddleNFTokenPageIndex);
|
||||
BEAST_EXPECT(!bobLastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
auto const bobNewFirstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(bob),
|
||||
bobLastNFTokenPage->at(sfPreviousPageMin)));
|
||||
if (!BEAST_EXPECT(bobNewFirstNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
bobNewFirstNFTokenPage->isFieldPresent(sfNextPageMin) &&
|
||||
bobNewFirstNFTokenPage->at(sfNextPageMin) ==
|
||||
lastPageKeylet.key);
|
||||
BEAST_EXPECT(
|
||||
!bobNewFirstNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
}
|
||||
|
||||
// bob's middle page should be gone.
|
||||
BEAST_EXPECT(!env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(bob), bobMiddleNFTokenPageIndex)));
|
||||
|
||||
BEAST_EXPECT(nftCount(env, bob) == 64);
|
||||
BEAST_EXPECT(ownerCount(env, bob) == 2);
|
||||
|
||||
//**********************************************************************
|
||||
// Step 3C: Repair the three-page directory (carol's)
|
||||
//**********************************************************************
|
||||
|
||||
// Verify that carol's NFToken directory is still damaged.
|
||||
|
||||
// carol's "middle" page is present and has no NextPageMin field.
|
||||
{
|
||||
auto carolMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(carolMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
!carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
// carol has a "last" page, but it has no PreviousPageMin field.
|
||||
{
|
||||
auto carolLastNFTokenPage = env.le(keylet::nftpage_max(carol));
|
||||
|
||||
BEAST_EXPECT(
|
||||
!carolLastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!carolLastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
// carol fixes the links in their own NFToken directory.
|
||||
env(ledgerStateFix::nftPageLinks(carol, carol), fee(linkFixFee));
|
||||
env.close();
|
||||
|
||||
{
|
||||
// carol's "middle" page is present and now has a NextPageMin field.
|
||||
auto const lastPageKeylet = keylet::nftpage_max(carol);
|
||||
auto carolMiddleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(carolMiddleNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin) &&
|
||||
carolMiddleNFTokenPage->at(sfNextPageMin) ==
|
||||
lastPageKeylet.key);
|
||||
|
||||
// carol has a "last" page that includes a PreviousPageMin field.
|
||||
auto carolLastNFTokenPage = env.le(lastPageKeylet);
|
||||
if (!BEAST_EXPECT(carolLastNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
carolLastNFTokenPage->isFieldPresent(sfPreviousPageMin) &&
|
||||
carolLastNFTokenPage->at(sfPreviousPageMin) ==
|
||||
carolMiddleNFTokenPageIndex);
|
||||
BEAST_EXPECT(!carolLastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
// carol also has a "first" page that includes a NextPageMin field.
|
||||
auto carolFirstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(carol),
|
||||
carolMiddleNFTokenPage->at(sfPreviousPageMin)));
|
||||
if (!BEAST_EXPECT(carolFirstNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
carolFirstNFTokenPage->isFieldPresent(sfNextPageMin) &&
|
||||
carolFirstNFTokenPage->at(sfNextPageMin) ==
|
||||
carolMiddleNFTokenPageIndex);
|
||||
BEAST_EXPECT(
|
||||
!carolFirstNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
}
|
||||
|
||||
// With the link repair, the server knows that carol has 96 NFTs.
|
||||
BEAST_EXPECT(nftCount(env, carol) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, carol) == 3);
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testLedgerStateFixErrors();
|
||||
testTokenPageLinkErrors();
|
||||
testFixNFTokenPageLinks();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(FixNFTokenPageLinks, tx, ripple);
|
||||
|
||||
} // namespace ripple
|
||||
@@ -80,6 +80,73 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite
|
||||
return nftokenID;
|
||||
};
|
||||
|
||||
// printNFTPages is a helper function that may be used for debugging.
|
||||
//
|
||||
// It uses the ledger RPC command to show the NFT pages in the ledger.
|
||||
// This parameter controls how noisy the output is.
|
||||
enum Volume : bool {
|
||||
quiet = false,
|
||||
noisy = true,
|
||||
};
|
||||
|
||||
void
|
||||
printNFTPages(test::jtx::Env& env, Volume vol)
|
||||
{
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::ledger_index] = "current";
|
||||
jvParams[jss::binary] = false;
|
||||
{
|
||||
Json::Value jrr = env.rpc(
|
||||
"json",
|
||||
"ledger_data",
|
||||
boost::lexical_cast<std::string>(jvParams));
|
||||
|
||||
// Iterate the state and print all NFTokenPages.
|
||||
if (!jrr.isMember(jss::result) ||
|
||||
!jrr[jss::result].isMember(jss::state))
|
||||
{
|
||||
std::cout << "No ledger state found!" << std::endl;
|
||||
return;
|
||||
}
|
||||
Json::Value& state = jrr[jss::result][jss::state];
|
||||
if (!state.isArray())
|
||||
{
|
||||
std::cout << "Ledger state is not array!" << std::endl;
|
||||
return;
|
||||
}
|
||||
for (Json::UInt i = 0; i < state.size(); ++i)
|
||||
{
|
||||
if (state[i].isMember(sfNFTokens.jsonName) &&
|
||||
state[i][sfNFTokens.jsonName].isArray())
|
||||
{
|
||||
std::uint32_t tokenCount =
|
||||
state[i][sfNFTokens.jsonName].size();
|
||||
std::cout << tokenCount << " NFtokens in page "
|
||||
<< state[i][jss::index].asString() << std::endl;
|
||||
|
||||
if (vol == noisy)
|
||||
{
|
||||
std::cout << state[i].toStyledString() << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (tokenCount > 0)
|
||||
std::cout << "first: "
|
||||
<< state[i][sfNFTokens.jsonName][0u]
|
||||
.toStyledString()
|
||||
<< std::endl;
|
||||
if (tokenCount > 1)
|
||||
std::cout
|
||||
<< "last: "
|
||||
<< state[i][sfNFTokens.jsonName][tokenCount - 1]
|
||||
.toStyledString()
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testBurnRandom(FeatureBitset features)
|
||||
{
|
||||
@@ -297,76 +364,10 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite
|
||||
Env env{*this, features};
|
||||
env.fund(XRP(1000), alice);
|
||||
|
||||
// printNFTPages is a lambda that may be used for debugging.
|
||||
//
|
||||
// It uses the ledger RPC command to show the NFT pages in the ledger.
|
||||
// This parameter controls how noisy the output is.
|
||||
enum Volume : bool {
|
||||
quiet = false,
|
||||
noisy = true,
|
||||
};
|
||||
|
||||
[[maybe_unused]] auto printNFTPages = [&env](Volume vol) {
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::ledger_index] = "current";
|
||||
jvParams[jss::binary] = false;
|
||||
{
|
||||
Json::Value jrr = env.rpc(
|
||||
"json",
|
||||
"ledger_data",
|
||||
boost::lexical_cast<std::string>(jvParams));
|
||||
|
||||
// Iterate the state and print all NFTokenPages.
|
||||
if (!jrr.isMember(jss::result) ||
|
||||
!jrr[jss::result].isMember(jss::state))
|
||||
{
|
||||
std::cout << "No ledger state found!" << std::endl;
|
||||
return;
|
||||
}
|
||||
Json::Value& state = jrr[jss::result][jss::state];
|
||||
if (!state.isArray())
|
||||
{
|
||||
std::cout << "Ledger state is not array!" << std::endl;
|
||||
return;
|
||||
}
|
||||
for (Json::UInt i = 0; i < state.size(); ++i)
|
||||
{
|
||||
if (state[i].isMember(sfNFTokens.jsonName) &&
|
||||
state[i][sfNFTokens.jsonName].isArray())
|
||||
{
|
||||
std::uint32_t tokenCount =
|
||||
state[i][sfNFTokens.jsonName].size();
|
||||
std::cout << tokenCount << " NFTokens in page "
|
||||
<< state[i][jss::index].asString()
|
||||
<< std::endl;
|
||||
|
||||
if (vol == noisy)
|
||||
{
|
||||
std::cout << state[i].toStyledString() << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (tokenCount > 0)
|
||||
std::cout << "first: "
|
||||
<< state[i][sfNFTokens.jsonName][0u]
|
||||
.toStyledString()
|
||||
<< std::endl;
|
||||
if (tokenCount > 1)
|
||||
std::cout << "last: "
|
||||
<< state[i][sfNFTokens.jsonName]
|
||||
[tokenCount - 1]
|
||||
.toStyledString()
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// A lambda that generates 96 nfts packed into three pages of 32 each.
|
||||
auto genPackedTokens = [this, &env, &alice](
|
||||
std::vector<uint256>& nfts) {
|
||||
nfts.clear();
|
||||
// Returns a sorted vector of the NFTokenIDs packed into the pages.
|
||||
auto genPackedTokens = [this, &env, &alice]() {
|
||||
std::vector<uint256> nfts;
|
||||
nfts.reserve(96);
|
||||
|
||||
// We want to create fully packed NFT pages. This is a little
|
||||
@@ -441,23 +442,24 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite
|
||||
// has changed.
|
||||
BEAST_EXPECT(pageCount == 3);
|
||||
}
|
||||
return nfts;
|
||||
};
|
||||
|
||||
// Generate three packed pages. Then burn the tokens in order from
|
||||
// first to last. This exercises specific cases where coalescing
|
||||
// pages is not possible.
|
||||
std::vector<uint256> nfts;
|
||||
genPackedTokens(nfts);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
for (uint256 const& nft : nfts)
|
||||
{
|
||||
env(token::burn(alice, {nft}));
|
||||
env.close();
|
||||
// Generate three packed pages. Then burn the tokens in order from
|
||||
// first to last. This exercises specific cases where coalescing
|
||||
// pages is not possible.
|
||||
std::vector<uint256> nfts = genPackedTokens();
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
for (uint256 const& nft : nfts)
|
||||
{
|
||||
env(token::burn(alice, {nft}));
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
|
||||
// A lambda verifies that the ledger no longer contains any NFT pages.
|
||||
auto checkNoTokenPages = [this, &env]() {
|
||||
@@ -479,48 +481,421 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite
|
||||
}
|
||||
};
|
||||
checkNoTokenPages();
|
||||
|
||||
// Generate three packed pages. Then burn the tokens in order from
|
||||
// last to first. This exercises different specific cases where
|
||||
// coalescing pages is not possible.
|
||||
genPackedTokens(nfts);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
std::reverse(nfts.begin(), nfts.end());
|
||||
for (uint256 const& nft : nfts)
|
||||
{
|
||||
env(token::burn(alice, {nft}));
|
||||
// Generate three packed pages. Then burn the tokens in order from
|
||||
// last to first. This exercises different specific cases where
|
||||
// coalescing pages is not possible.
|
||||
std::vector<uint256> nfts = genPackedTokens();
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
// Verify that that all three pages are present and remember the
|
||||
// indexes.
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const middleNFTokenPageIndex =
|
||||
lastNFTokenPage->at(sfPreviousPageMin);
|
||||
auto middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const firstNFTokenPageIndex =
|
||||
middleNFTokenPage->at(sfPreviousPageMin);
|
||||
auto firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(firstNFTokenPage))
|
||||
return;
|
||||
|
||||
// Burn almost all the tokens in the very last page.
|
||||
for (int i = 0; i < 31; ++i)
|
||||
{
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
}
|
||||
|
||||
// Verify that the last page is still present and contains just one
|
||||
// NFT.
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->getFieldArray(sfNFTokens).size() == 1);
|
||||
BEAST_EXPECT(lastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
// Delete the last token from the last page.
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
|
||||
if (features[fixNFTokenPageLinks])
|
||||
{
|
||||
// Removing the last token from the last page deletes the
|
||||
// _previous_ page because we need to preserve that last
|
||||
// page an an anchor. The contents of the next-to-last page
|
||||
// are moved into the last page.
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
BEAST_EXPECT(lastNFTokenPage);
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->at(~sfPreviousPageMin) ==
|
||||
firstNFTokenPageIndex);
|
||||
BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->getFieldArray(sfNFTokens).size() == 32);
|
||||
|
||||
// The "middle" page should be gone.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
BEAST_EXPECT(!middleNFTokenPage);
|
||||
|
||||
// The "first" page should still be present and linked to
|
||||
// the last page.
|
||||
firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
BEAST_EXPECT(firstNFTokenPage);
|
||||
BEAST_EXPECT(
|
||||
!firstNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(
|
||||
firstNFTokenPage->at(~sfNextPageMin) ==
|
||||
lastNFTokenPage->key());
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->getFieldArray(sfNFTokens).size() == 32);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Removing the last token from the last page deletes the last
|
||||
// page. This is a bug. The contents of the next-to-last page
|
||||
// should have been moved into the last page.
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
BEAST_EXPECT(!lastNFTokenPage);
|
||||
|
||||
// The "middle" page is still present, but has lost the
|
||||
// NextPageMin field.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
BEAST_EXPECT(
|
||||
middleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
// Delete the rest of the NFTokens.
|
||||
while (!nfts.empty())
|
||||
{
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
}
|
||||
checkNoTokenPages();
|
||||
{
|
||||
// Generate three packed pages. Then burn all tokens in the middle
|
||||
// page. This exercises the case where a page is removed between
|
||||
// two fully populated pages.
|
||||
std::vector<uint256> nfts = genPackedTokens();
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
// Verify that that all three pages are present and remember the
|
||||
// indexes.
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const middleNFTokenPageIndex =
|
||||
lastNFTokenPage->at(sfPreviousPageMin);
|
||||
auto middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const firstNFTokenPageIndex =
|
||||
middleNFTokenPage->at(sfPreviousPageMin);
|
||||
auto firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(firstNFTokenPage))
|
||||
return;
|
||||
|
||||
for (std::size_t i = 32; i < 64; ++i)
|
||||
{
|
||||
env(token::burn(alice, nfts[i]));
|
||||
env.close();
|
||||
}
|
||||
nfts.erase(nfts.begin() + 32, nfts.begin() + 64);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 64);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 2);
|
||||
|
||||
// Verify that middle page is gone and the links in the two
|
||||
// remaining pages are correct.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
BEAST_EXPECT(!middleNFTokenPage);
|
||||
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->getFieldH256(sfPreviousPageMin) ==
|
||||
firstNFTokenPageIndex);
|
||||
|
||||
firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
BEAST_EXPECT(
|
||||
firstNFTokenPage->getFieldH256(sfNextPageMin) ==
|
||||
keylet::nftpage_max(alice).key);
|
||||
BEAST_EXPECT(!firstNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
|
||||
// Burn the remaining nfts.
|
||||
for (uint256 const& nft : nfts)
|
||||
{
|
||||
env(token::burn(alice, {nft}));
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
}
|
||||
checkNoTokenPages();
|
||||
{
|
||||
// Generate three packed pages. Then burn all the tokens in the
|
||||
// first page followed by all the tokens in the last page. This
|
||||
// exercises a specific case where coalescing pages is not possible.
|
||||
std::vector<uint256> nfts = genPackedTokens();
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
// Verify that that all three pages are present and remember the
|
||||
// indexes.
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const middleNFTokenPageIndex =
|
||||
lastNFTokenPage->at(sfPreviousPageMin);
|
||||
auto middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const firstNFTokenPageIndex =
|
||||
middleNFTokenPage->at(sfPreviousPageMin);
|
||||
auto firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(firstNFTokenPage))
|
||||
return;
|
||||
|
||||
// Burn all the tokens in the first page.
|
||||
std::reverse(nfts.begin(), nfts.end());
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
}
|
||||
|
||||
// Verify the first page is gone.
|
||||
firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
BEAST_EXPECT(!firstNFTokenPage);
|
||||
|
||||
// Check the links in the other two pages.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(middleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
BEAST_EXPECT(lastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
// Burn all the tokens in the last page.
|
||||
std::reverse(nfts.begin(), nfts.end());
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
}
|
||||
|
||||
if (features[fixNFTokenPageLinks])
|
||||
{
|
||||
// Removing the last token from the last page deletes the
|
||||
// _previous_ page because we need to preserve that last
|
||||
// page an an anchor. The contents of the next-to-last page
|
||||
// are moved into the last page.
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
BEAST_EXPECT(lastNFTokenPage);
|
||||
BEAST_EXPECT(
|
||||
!lastNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->getFieldArray(sfNFTokens).size() == 32);
|
||||
|
||||
// The "middle" page should be gone.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
BEAST_EXPECT(!middleNFTokenPage);
|
||||
|
||||
// The "first" page should still be gone.
|
||||
firstNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
BEAST_EXPECT(!firstNFTokenPage);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Removing the last token from the last page deletes the last
|
||||
// page. This is a bug. The contents of the next-to-last page
|
||||
// should have been moved into the last page.
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
BEAST_EXPECT(!lastNFTokenPage);
|
||||
|
||||
// The "middle" page is still present, but has lost the
|
||||
// NextPageMin field.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
BEAST_EXPECT(
|
||||
!middleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
}
|
||||
|
||||
// Delete the rest of the NFTokens.
|
||||
while (!nfts.empty())
|
||||
{
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 0);
|
||||
checkNoTokenPages();
|
||||
|
||||
// Generate three packed pages. Then burn all tokens in the middle
|
||||
// page. This exercises the case where a page is removed between
|
||||
// two fully populated pages.
|
||||
genPackedTokens(nfts);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
for (std::size_t i = 32; i < 64; ++i)
|
||||
if (features[fixNFTokenPageLinks])
|
||||
{
|
||||
env(token::burn(alice, nfts[i]));
|
||||
env.close();
|
||||
}
|
||||
nfts.erase(nfts.begin() + 32, nfts.begin() + 64);
|
||||
BEAST_EXPECT(nftCount(env, alice) == 64);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 2);
|
||||
// Exercise the invariant that the final NFTokenPage of a directory
|
||||
// may not be removed if there are NFTokens in other pages of the
|
||||
// directory.
|
||||
//
|
||||
// We're going to fire an Invariant failure that is difficult to
|
||||
// cause. We do it here because the tools are here.
|
||||
//
|
||||
// See Invariants_test.cpp for examples of other invariant tests
|
||||
// that this one is modeled after.
|
||||
|
||||
// Burn the remaining nfts.
|
||||
for (uint256 const& nft : nfts)
|
||||
{
|
||||
env(token::burn(alice, {nft}));
|
||||
env.close();
|
||||
// Generate three closely packed NFTokenPages.
|
||||
std::vector<uint256> nfts = genPackedTokens();
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
// Burn almost all the tokens in the very last page.
|
||||
for (int i = 0; i < 31; ++i)
|
||||
{
|
||||
env(token::burn(alice, {nfts.back()}));
|
||||
nfts.pop_back();
|
||||
env.close();
|
||||
}
|
||||
{
|
||||
// Create an ApplyContext we can use to run the invariant
|
||||
// checks. These variables must outlive the ApplyContext.
|
||||
OpenView ov{*env.current()};
|
||||
STTx tx{ttACCOUNT_SET, [](STObject&) {}};
|
||||
test::StreamSink sink{beast::severities::kWarning};
|
||||
beast::Journal jlog{sink};
|
||||
ApplyContext ac{
|
||||
env.app(),
|
||||
ov,
|
||||
tx,
|
||||
tesSUCCESS,
|
||||
env.current()->fees().base,
|
||||
tapNONE,
|
||||
jlog};
|
||||
|
||||
// Verify that the last page is present and contains one NFT.
|
||||
auto lastNFTokenPage =
|
||||
ac.view().peek(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
BEAST_EXPECT(
|
||||
lastNFTokenPage->getFieldArray(sfNFTokens).size() == 1);
|
||||
|
||||
// Erase that last page.
|
||||
ac.view().erase(lastNFTokenPage);
|
||||
|
||||
// Exercise the invariant.
|
||||
TER terActual = tesSUCCESS;
|
||||
for (TER const& terExpect :
|
||||
{TER(tecINVARIANT_FAILED), TER(tefINVARIANT_FAILED)})
|
||||
{
|
||||
terActual = ac.checkInvariants(terActual, XRPAmount{});
|
||||
BEAST_EXPECT(terExpect == terActual);
|
||||
BEAST_EXPECT(
|
||||
sink.messages().str().starts_with("Invariant failed:"));
|
||||
// uncomment to log the invariant failure message
|
||||
// log << " --> " << sink.messages().str() << std::endl;
|
||||
BEAST_EXPECT(
|
||||
sink.messages().str().find(
|
||||
"Last NFT page deleted with non-empty directory") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
{
|
||||
// Create an ApplyContext we can use to run the invariant
|
||||
// checks. These variables must outlive the ApplyContext.
|
||||
OpenView ov{*env.current()};
|
||||
STTx tx{ttACCOUNT_SET, [](STObject&) {}};
|
||||
test::StreamSink sink{beast::severities::kWarning};
|
||||
beast::Journal jlog{sink};
|
||||
ApplyContext ac{
|
||||
env.app(),
|
||||
ov,
|
||||
tx,
|
||||
tesSUCCESS,
|
||||
env.current()->fees().base,
|
||||
tapNONE,
|
||||
jlog};
|
||||
|
||||
// Verify that the middle page is present.
|
||||
auto lastNFTokenPage =
|
||||
ac.view().peek(keylet::nftpage_max(alice));
|
||||
auto middleNFTokenPage = ac.view().peek(keylet::nftpage(
|
||||
keylet::nftpage_min(alice),
|
||||
lastNFTokenPage->getFieldH256(sfPreviousPageMin)));
|
||||
BEAST_EXPECT(middleNFTokenPage);
|
||||
|
||||
// Remove the NextMinPage link from the middle page to fire
|
||||
// the invariant.
|
||||
middleNFTokenPage->makeFieldAbsent(sfNextPageMin);
|
||||
ac.view().update(middleNFTokenPage);
|
||||
|
||||
// Exercise the invariant.
|
||||
TER terActual = tesSUCCESS;
|
||||
for (TER const& terExpect :
|
||||
{TER(tecINVARIANT_FAILED), TER(tefINVARIANT_FAILED)})
|
||||
{
|
||||
terActual = ac.checkInvariants(terActual, XRPAmount{});
|
||||
BEAST_EXPECT(terExpect == terActual);
|
||||
BEAST_EXPECT(
|
||||
sink.messages().str().starts_with("Invariant failed:"));
|
||||
// uncomment to log the invariant failure message
|
||||
// log << " --> " << sink.messages().str() << std::endl;
|
||||
BEAST_EXPECT(
|
||||
sink.messages().str().find("Lost NextMinPage link") !=
|
||||
std::string::npos);
|
||||
}
|
||||
}
|
||||
}
|
||||
BEAST_EXPECT(nftCount(env, alice) == 0);
|
||||
checkNoTokenPages();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -778,12 +1153,238 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
exerciseBrokenLinks(FeatureBitset features)
|
||||
{
|
||||
// Amendment fixNFTokenPageLinks prevents the breakage we want
|
||||
// to observe.
|
||||
if (features[fixNFTokenPageLinks])
|
||||
return;
|
||||
|
||||
// a couple of directory merging scenarios that can only be tested by
|
||||
// inserting and deleting in an ordered fashion. We do that testing
|
||||
// now.
|
||||
testcase("Exercise broken links");
|
||||
|
||||
using namespace test::jtx;
|
||||
|
||||
Account const alice{"alice"};
|
||||
Account const minter{"minter"};
|
||||
|
||||
Env env{*this, features};
|
||||
env.fund(XRP(1000), alice, minter);
|
||||
|
||||
// A lambda that generates 96 nfts packed into three pages of 32 each.
|
||||
// Returns a sorted vector of the NFTokenIDs packed into the pages.
|
||||
auto genPackedTokens = [this, &env, &alice, &minter]() {
|
||||
std::vector<uint256> nfts;
|
||||
nfts.reserve(96);
|
||||
|
||||
// We want to create fully packed NFT pages. This is a little
|
||||
// tricky since the system currently in place is inclined to
|
||||
// assign consecutive tokens to only 16 entries per page.
|
||||
//
|
||||
// By manipulating the internal form of the taxon we can force
|
||||
// creation of NFT pages that are completely full. This lambda
|
||||
// tells us the taxon value we should pass in in order for the
|
||||
// internal representation to match the passed in value.
|
||||
auto internalTaxon = [&env](
|
||||
Account const& acct,
|
||||
std::uint32_t taxon) -> std::uint32_t {
|
||||
std::uint32_t tokenSeq =
|
||||
env.le(acct)->at(~sfMintedNFTokens).value_or(0);
|
||||
|
||||
// If fixNFTokenRemint amendment is on, we must
|
||||
// add FirstNFTokenSequence.
|
||||
if (env.current()->rules().enabled(fixNFTokenRemint))
|
||||
tokenSeq += env.le(acct)
|
||||
->at(~sfFirstNFTokenSequence)
|
||||
.value_or(env.seq(acct));
|
||||
|
||||
return toUInt32(
|
||||
nft::cipheredTaxon(tokenSeq, nft::toTaxon(taxon)));
|
||||
};
|
||||
|
||||
for (std::uint32_t i = 0; i < 96; ++i)
|
||||
{
|
||||
// In order to fill the pages we use the taxon to break them
|
||||
// into groups of 16 entries. By having the internal
|
||||
// representation of the taxon go...
|
||||
// 0, 3, 2, 5, 4, 7...
|
||||
// in sets of 16 NFTs we can get each page to be fully
|
||||
// populated.
|
||||
std::uint32_t const intTaxon = (i / 16) + (i & 0b10000 ? 2 : 0);
|
||||
uint32_t const extTaxon = internalTaxon(minter, intTaxon);
|
||||
nfts.push_back(
|
||||
token::getNextID(env, minter, extTaxon, tfTransferable));
|
||||
env(token::mint(minter, extTaxon), txflags(tfTransferable));
|
||||
env.close();
|
||||
|
||||
// Minter creates an offer for the NFToken.
|
||||
uint256 const minterOfferIndex =
|
||||
keylet::nftoffer(minter, env.seq(minter)).key;
|
||||
env(token::createOffer(minter, nfts.back(), XRP(0)),
|
||||
txflags(tfSellNFToken));
|
||||
env.close();
|
||||
|
||||
// alice accepts the offer.
|
||||
env(token::acceptSellOffer(alice, minterOfferIndex));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// Sort the NFTs so they are listed in storage order, not
|
||||
// creation order.
|
||||
std::sort(nfts.begin(), nfts.end());
|
||||
|
||||
// Verify that the ledger does indeed contain exactly three pages
|
||||
// of NFTs with 32 entries in each page.
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::ledger_index] = "current";
|
||||
jvParams[jss::binary] = false;
|
||||
{
|
||||
Json::Value jrr = env.rpc(
|
||||
"json",
|
||||
"ledger_data",
|
||||
boost::lexical_cast<std::string>(jvParams));
|
||||
|
||||
Json::Value& state = jrr[jss::result][jss::state];
|
||||
|
||||
int pageCount = 0;
|
||||
for (Json::UInt i = 0; i < state.size(); ++i)
|
||||
{
|
||||
if (state[i].isMember(sfNFTokens.jsonName) &&
|
||||
state[i][sfNFTokens.jsonName].isArray())
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
state[i][sfNFTokens.jsonName].size() == 32);
|
||||
++pageCount;
|
||||
}
|
||||
}
|
||||
// If this check fails then the internal NFT directory logic
|
||||
// has changed.
|
||||
BEAST_EXPECT(pageCount == 3);
|
||||
}
|
||||
return nfts;
|
||||
};
|
||||
|
||||
// Generate three packed pages.
|
||||
std::vector<uint256> nfts = genPackedTokens();
|
||||
BEAST_EXPECT(nftCount(env, alice) == 96);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3);
|
||||
|
||||
// Verify that that all three pages are present and remember the
|
||||
// indexes.
|
||||
auto lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
if (!BEAST_EXPECT(lastNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const middleNFTokenPageIndex =
|
||||
lastNFTokenPage->at(sfPreviousPageMin);
|
||||
auto middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
|
||||
uint256 const firstNFTokenPageIndex =
|
||||
middleNFTokenPage->at(sfPreviousPageMin);
|
||||
auto firstNFTokenPage = env.le(
|
||||
keylet::nftpage(keylet::nftpage_min(alice), firstNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(firstNFTokenPage))
|
||||
return;
|
||||
|
||||
// Sell all the tokens in the very last page back to minter.
|
||||
std::vector<uint256> last32NFTs;
|
||||
for (int i = 0; i < 32; ++i)
|
||||
{
|
||||
last32NFTs.push_back(nfts.back());
|
||||
nfts.pop_back();
|
||||
|
||||
// alice creates an offer for the NFToken.
|
||||
uint256 const aliceOfferIndex =
|
||||
keylet::nftoffer(alice, env.seq(alice)).key;
|
||||
env(token::createOffer(alice, last32NFTs.back(), XRP(0)),
|
||||
txflags(tfSellNFToken));
|
||||
env.close();
|
||||
|
||||
// minter accepts the offer.
|
||||
env(token::acceptSellOffer(minter, aliceOfferIndex));
|
||||
env.close();
|
||||
}
|
||||
|
||||
// Removing the last token from the last page deletes alice's last
|
||||
// page. This is a bug. The contents of the next-to-last page
|
||||
// should have been moved into the last page.
|
||||
lastNFTokenPage = env.le(keylet::nftpage_max(alice));
|
||||
BEAST_EXPECT(!lastNFTokenPage);
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 2);
|
||||
|
||||
// The "middle" page is still present, but has lost the
|
||||
// NextPageMin field.
|
||||
middleNFTokenPage = env.le(keylet::nftpage(
|
||||
keylet::nftpage_min(alice), middleNFTokenPageIndex));
|
||||
if (!BEAST_EXPECT(middleNFTokenPage))
|
||||
return;
|
||||
BEAST_EXPECT(middleNFTokenPage->isFieldPresent(sfPreviousPageMin));
|
||||
BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfNextPageMin));
|
||||
|
||||
// Attempt to delete alice's account, but fail because she owns NFTs.
|
||||
auto const acctDelFee{drops(env.current()->fees().increment)};
|
||||
env(acctdelete(alice, minter),
|
||||
fee(acctDelFee),
|
||||
ter(tecHAS_OBLIGATIONS));
|
||||
env.close();
|
||||
|
||||
// minter sells the last 32 NFTs back to alice.
|
||||
for (uint256 nftID : last32NFTs)
|
||||
{
|
||||
// minter creates an offer for the NFToken.
|
||||
uint256 const minterOfferIndex =
|
||||
keylet::nftoffer(minter, env.seq(minter)).key;
|
||||
env(token::createOffer(minter, nftID, XRP(0)),
|
||||
txflags(tfSellNFToken));
|
||||
env.close();
|
||||
|
||||
// alice accepts the offer.
|
||||
env(token::acceptSellOffer(alice, minterOfferIndex));
|
||||
env.close();
|
||||
}
|
||||
BEAST_EXPECT(ownerCount(env, alice) == 3); // Three NFTokenPages.
|
||||
|
||||
// alice has an NFToken directory with a broken link in the middle.
|
||||
{
|
||||
// Try the account_objects RPC command. Alice's account only shows
|
||||
// two NFT pages even though she owns more.
|
||||
Json::Value acctObjs = [&env, &alice]() {
|
||||
Json::Value params;
|
||||
params[jss::account] = alice.human();
|
||||
return env.rpc("json", "account_objects", to_string(params));
|
||||
}();
|
||||
BEAST_EXPECT(!acctObjs.isMember(jss::marker));
|
||||
BEAST_EXPECT(
|
||||
acctObjs[jss::result][jss::account_objects].size() == 2);
|
||||
}
|
||||
{
|
||||
// Try the account_nfts RPC command. It only returns 64 NFTs
|
||||
// although alice owns 96.
|
||||
Json::Value aliceNFTs = [&env, &alice]() {
|
||||
Json::Value params;
|
||||
params[jss::account] = alice.human();
|
||||
params[jss::type] = "state";
|
||||
return env.rpc("json", "account_nfts", to_string(params));
|
||||
}();
|
||||
BEAST_EXPECT(!aliceNFTs.isMember(jss::marker));
|
||||
BEAST_EXPECT(
|
||||
aliceNFTs[jss::result][jss::account_nfts].size() == 64);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testWithFeats(FeatureBitset features)
|
||||
{
|
||||
testBurnRandom(features);
|
||||
testBurnSequential(features);
|
||||
testBurnTooManyOffers(features);
|
||||
exerciseBrokenLinks(features);
|
||||
}
|
||||
|
||||
protected:
|
||||
@@ -792,13 +1393,18 @@ protected:
|
||||
{
|
||||
using namespace test::jtx;
|
||||
static FeatureBitset const all{supported_amendments()};
|
||||
static FeatureBitset const fixNFTV1_2{fixNonFungibleTokensV1_2};
|
||||
static FeatureBitset const fixNFTDir{fixNFTokenDirV1};
|
||||
static FeatureBitset const fixNFTRemint{fixNFTokenRemint};
|
||||
static FeatureBitset const fixNFTPageLinks{fixNFTokenPageLinks};
|
||||
|
||||
static std::array<FeatureBitset, 4> const feats{
|
||||
all - fixNonFungibleTokensV1_2 - fixNFTDir - fixNFTokenRemint,
|
||||
all - fixNonFungibleTokensV1_2 - fixNFTokenRemint,
|
||||
all - fixNFTokenRemint,
|
||||
all};
|
||||
static std::array<FeatureBitset, 5> const feats{
|
||||
all - fixNFTV1_2 - fixNFTDir - fixNFTRemint - fixNFTPageLinks,
|
||||
all - fixNFTV1_2 - fixNFTRemint - fixNFTPageLinks,
|
||||
all - fixNFTRemint - fixNFTPageLinks,
|
||||
all - fixNFTPageLinks,
|
||||
all,
|
||||
};
|
||||
|
||||
if (BEAST_EXPECT(instance < feats.size()))
|
||||
{
|
||||
@@ -835,19 +1441,30 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class NFTokenBurnWOFixNFTPageLinks_test : public NFTokenBurnBaseUtil_test
|
||||
{
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
NFTokenBurnBaseUtil_test::run(3);
|
||||
}
|
||||
};
|
||||
|
||||
class NFTokenBurnAllFeatures_test : public NFTokenBurnBaseUtil_test
|
||||
{
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
{
|
||||
NFTokenBurnBaseUtil_test::run(3, true);
|
||||
NFTokenBurnBaseUtil_test::run(4, true);
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnBaseUtil, tx, ripple, 3);
|
||||
BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOfixFungTokens, tx, ripple, 3);
|
||||
BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixTokenRemint, tx, ripple, 3);
|
||||
BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixNFTPageLinks, tx, ripple, 3);
|
||||
BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnAllFeatures, tx, ripple, 3);
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <test/jtx/invoice_id.h>
|
||||
#include <test/jtx/jtx_json.h>
|
||||
#include <test/jtx/last_ledger_sequence.h>
|
||||
#include <test/jtx/ledgerStateFix.h>
|
||||
#include <test/jtx/memo.h>
|
||||
#include <test/jtx/multisign.h>
|
||||
#include <test/jtx/noop.h>
|
||||
|
||||
49
src/test/jtx/impl/ledgerStateFix.cpp
Normal file
49
src/test/jtx/impl/ledgerStateFix.cpp
Normal file
@@ -0,0 +1,49 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <test/jtx/ledgerStateFix.h>
|
||||
|
||||
#include <xrpld/app/tx/detail/LedgerStateFix.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
#include <xrpl/protocol/jss.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
namespace jtx {
|
||||
|
||||
namespace ledgerStateFix {
|
||||
|
||||
// Fix NFTokenPage links on owner's account. acct pays fee.
|
||||
Json::Value
|
||||
nftPageLinks(jtx::Account const& acct, jtx::Account const& owner)
|
||||
{
|
||||
Json::Value jv;
|
||||
jv[sfAccount.jsonName] = acct.human();
|
||||
jv[sfLedgerFixType.jsonName] = LedgerStateFix::nfTokenPageLink;
|
||||
jv[sfOwner.jsonName] = owner.human();
|
||||
jv[sfTransactionType.jsonName] = jss::LedgerStateFix;
|
||||
jv[sfFlags.jsonName] = tfUniversal;
|
||||
return jv;
|
||||
}
|
||||
|
||||
} // namespace ledgerStateFix
|
||||
|
||||
} // namespace jtx
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
44
src/test/jtx/ledgerStateFix.h
Normal file
44
src/test/jtx/ledgerStateFix.h
Normal file
@@ -0,0 +1,44 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_TEST_JTX_LEDGER_STATE_FIX_H_INCLUDED
|
||||
#define RIPPLE_TEST_JTX_LEDGER_STATE_FIX_H_INCLUDED
|
||||
|
||||
#include <test/jtx/Account.h>
|
||||
#include <test/jtx/Env.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
namespace jtx {
|
||||
|
||||
/** LedgerStateFix operations. */
|
||||
namespace ledgerStateFix {
|
||||
|
||||
/** Repair the links in an NFToken directory. */
|
||||
Json::Value
|
||||
nftPageLinks(jtx::Account const& acct, jtx::Account const& owner);
|
||||
|
||||
} // namespace ledgerStateFix
|
||||
|
||||
} // namespace jtx
|
||||
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -24,7 +24,9 @@
|
||||
#include <xrpld/app/tx/detail/ApplyContext.h>
|
||||
#include <xrpld/app/tx/detail/Transactor.h>
|
||||
#include <xrpl/beast/utility/Journal.h>
|
||||
#include <xrpl/protocol/InnerObjectFormats.h>
|
||||
#include <xrpl/protocol/STLedgerEntry.h>
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
namespace ripple {
|
||||
@@ -110,10 +112,9 @@ class Invariants_test : public beast::unit_test::suite
|
||||
terActual = ac.checkInvariants(terActual, fee);
|
||||
BEAST_EXPECT(terExpect == terActual);
|
||||
BEAST_EXPECT(
|
||||
boost::starts_with(
|
||||
sink.messages().str(), "Invariant failed:") ||
|
||||
boost::starts_with(
|
||||
sink.messages().str(), "Transaction caused an exception"));
|
||||
sink.messages().str().starts_with("Invariant failed:") ||
|
||||
sink.messages().str().starts_with(
|
||||
"Transaction caused an exception"));
|
||||
// uncomment if you want to log the invariant failure message
|
||||
// log << " --> " << sink.messages().str() << std::endl;
|
||||
for (auto const& m : expect_logs)
|
||||
@@ -650,6 +651,153 @@ class Invariants_test : public beast::unit_test::suite
|
||||
STTx{ttPAYMENT, [](STObject& tx) {}});
|
||||
}
|
||||
|
||||
void
|
||||
testNFTokenPageInvariants()
|
||||
{
|
||||
using namespace test::jtx;
|
||||
testcase << "NFTokenPage";
|
||||
|
||||
// lambda that returns an STArray of NFTokenIDs.
|
||||
uint256 const firstNFTID(
|
||||
"0000000000000000000000000000000000000001FFFFFFFFFFFFFFFF00000000");
|
||||
auto makeNFTokenIDs = [&firstNFTID](unsigned int nftCount) {
|
||||
SOTemplate const* nfTokenTemplate =
|
||||
InnerObjectFormats::getInstance().findSOTemplateBySField(
|
||||
sfNFToken);
|
||||
|
||||
uint256 nftID(firstNFTID);
|
||||
STArray ret;
|
||||
for (int i = 0; i < nftCount; ++i)
|
||||
{
|
||||
STObject newNFToken(
|
||||
*nfTokenTemplate, sfNFToken, [&nftID](STObject& object) {
|
||||
object.setFieldH256(sfNFTokenID, nftID);
|
||||
});
|
||||
ret.push_back(std::move(newNFToken));
|
||||
++nftID;
|
||||
}
|
||||
return ret;
|
||||
};
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT page has invalid size"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(0));
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT page has invalid size"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(33));
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFTs on page are not sorted"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
STArray nfTokens = makeNFTokenIDs(2);
|
||||
std::iter_swap(nfTokens.begin(), nfTokens.begin() + 1);
|
||||
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, nfTokens);
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT contains empty URI"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
STArray nfTokens = makeNFTokenIDs(1);
|
||||
nfTokens[0].setFieldVL(sfURI, Blob{});
|
||||
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, nfTokens);
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT page is improperly linked"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(1));
|
||||
nftPage->setFieldH256(
|
||||
sfPreviousPageMin, keylet::nftpage_max(A1).key);
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT page is improperly linked"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const& A2, ApplyContext& ac) {
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(1));
|
||||
nftPage->setFieldH256(
|
||||
sfPreviousPageMin, keylet::nftpage_min(A2).key);
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT page is improperly linked"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage_max(A1));
|
||||
nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(1));
|
||||
nftPage->setFieldH256(sfNextPageMin, nftPage->key());
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT page is improperly linked"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const& A2, ApplyContext& ac) {
|
||||
STArray nfTokens = makeNFTokenIDs(1);
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage(
|
||||
keylet::nftpage_max(A1),
|
||||
++(nfTokens[0].getFieldH256(sfNFTokenID))));
|
||||
nftPage->setFieldArray(sfNFTokens, std::move(nfTokens));
|
||||
nftPage->setFieldH256(
|
||||
sfNextPageMin, keylet::nftpage_max(A2).key);
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
|
||||
doInvariantCheck(
|
||||
{{"NFT found in incorrect page"}},
|
||||
[&makeNFTokenIDs](
|
||||
Account const& A1, Account const&, ApplyContext& ac) {
|
||||
STArray nfTokens = makeNFTokenIDs(2);
|
||||
auto nftPage = std::make_shared<SLE>(keylet::nftpage(
|
||||
keylet::nftpage_max(A1),
|
||||
(nfTokens[1].getFieldH256(sfNFTokenID))));
|
||||
nftPage->setFieldArray(sfNFTokens, std::move(nfTokens));
|
||||
|
||||
ac.view().insert(nftPage);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
run() override
|
||||
@@ -664,6 +812,7 @@ public:
|
||||
testNoBadOffers();
|
||||
testNoZeroEscrow();
|
||||
testValidNewAccountRoot();
|
||||
testNFTokenPageInvariants();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -612,6 +612,10 @@ ValidNFTokenPage::visitEntry(
|
||||
static constexpr uint256 const& pageBits = nft::pageMask;
|
||||
static constexpr uint256 const accountBits = ~pageBits;
|
||||
|
||||
if ((before && before->getType() != ltNFTOKEN_PAGE) ||
|
||||
(after && after->getType() != ltNFTOKEN_PAGE))
|
||||
return;
|
||||
|
||||
auto check = [this, isDelete](std::shared_ptr<SLE const> const& sle) {
|
||||
uint256 const account = sle->key() & accountBits;
|
||||
uint256 const hiLimit = sle->key() & pageBits;
|
||||
@@ -673,11 +677,37 @@ ValidNFTokenPage::visitEntry(
|
||||
}
|
||||
};
|
||||
|
||||
if (before && before->getType() == ltNFTOKEN_PAGE)
|
||||
if (before)
|
||||
{
|
||||
check(before);
|
||||
|
||||
if (after && after->getType() == ltNFTOKEN_PAGE)
|
||||
// While an account's NFToken directory contains any NFTokens, the last
|
||||
// NFTokenPage (with 96 bits of 1 in the low part of the index) should
|
||||
// never be deleted.
|
||||
if (isDelete && (before->key() & nft::pageMask) == nft::pageMask &&
|
||||
before->isFieldPresent(sfPreviousPageMin))
|
||||
{
|
||||
deletedFinalPage_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (after)
|
||||
check(after);
|
||||
|
||||
if (!isDelete && before && after)
|
||||
{
|
||||
// If the NFTokenPage
|
||||
// 1. Has a NextMinPage field in before, but loses it in after, and
|
||||
// 2. This is not the last page in the directory
|
||||
// Then we have identified a corruption in the links between the
|
||||
// NFToken pages in the NFToken directory.
|
||||
if ((before->key() & nft::pageMask) != nft::pageMask &&
|
||||
before->isFieldPresent(sfNextPageMin) &&
|
||||
!after->isFieldPresent(sfNextPageMin))
|
||||
{
|
||||
deletedLink_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -718,6 +748,21 @@ ValidNFTokenPage::finalize(
|
||||
return false;
|
||||
}
|
||||
|
||||
if (view.rules().enabled(fixNFTokenPageLinks))
|
||||
{
|
||||
if (deletedFinalPage_)
|
||||
{
|
||||
JLOG(j.fatal()) << "Invariant failed: Last NFT page deleted with "
|
||||
"non-empty directory.";
|
||||
return false;
|
||||
}
|
||||
if (deletedLink_)
|
||||
{
|
||||
JLOG(j.fatal()) << "Invariant failed: Lost NextMinPage link.";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -367,6 +367,8 @@ class ValidNFTokenPage
|
||||
bool badSort_ = false;
|
||||
bool badURI_ = false;
|
||||
bool invalidSize_ = false;
|
||||
bool deletedFinalPage_ = false;
|
||||
bool deletedLink_ = false;
|
||||
|
||||
public:
|
||||
void
|
||||
|
||||
99
src/xrpld/app/tx/detail/LedgerStateFix.cpp
Normal file
99
src/xrpld/app/tx/detail/LedgerStateFix.cpp
Normal file
@@ -0,0 +1,99 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <xrpld/app/tx/detail/LedgerStateFix.h>
|
||||
|
||||
#include <xrpld/app/tx/detail/NFTokenUtils.h>
|
||||
#include <xrpld/ledger/View.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
#include <xrpl/protocol/Protocol.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
NotTEC
|
||||
LedgerStateFix::preflight(PreflightContext const& ctx)
|
||||
{
|
||||
if (!ctx.rules.enabled(fixNFTokenPageLinks))
|
||||
return temDISABLED;
|
||||
|
||||
if (ctx.tx.getFlags() & tfUniversalMask)
|
||||
return temINVALID_FLAG;
|
||||
|
||||
if (auto const ret = preflight1(ctx); !isTesSuccess(ret))
|
||||
return ret;
|
||||
|
||||
switch (ctx.tx[sfLedgerFixType])
|
||||
{
|
||||
case FixType::nfTokenPageLink:
|
||||
if (!ctx.tx.isFieldPresent(sfOwner))
|
||||
return temINVALID;
|
||||
break;
|
||||
|
||||
default:
|
||||
return tefINVALID_LEDGER_FIX_TYPE;
|
||||
}
|
||||
|
||||
return preflight2(ctx);
|
||||
}
|
||||
|
||||
XRPAmount
|
||||
LedgerStateFix::calculateBaseFee(ReadView const& view, STTx const& tx)
|
||||
{
|
||||
// The fee required for LedgerStateFix is one owner reserve, just like
|
||||
// the fee for AccountDelete.
|
||||
return view.fees().increment;
|
||||
}
|
||||
|
||||
TER
|
||||
LedgerStateFix::preclaim(PreclaimContext const& ctx)
|
||||
{
|
||||
switch (ctx.tx[sfLedgerFixType])
|
||||
{
|
||||
case FixType::nfTokenPageLink: {
|
||||
AccountID const owner{ctx.tx[sfOwner]};
|
||||
if (!ctx.view.read(keylet::account(owner)))
|
||||
return tecOBJECT_NOT_FOUND;
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
// preflight is supposed to verify that only valid FixTypes get to preclaim.
|
||||
return tecINTERNAL;
|
||||
}
|
||||
|
||||
TER
|
||||
LedgerStateFix::doApply()
|
||||
{
|
||||
switch (ctx_.tx[sfLedgerFixType])
|
||||
{
|
||||
case FixType::nfTokenPageLink:
|
||||
if (!nft::repairNFTokenDirectoryLinks(view(), ctx_.tx[sfOwner]))
|
||||
return tecFAILED_PROCESSING;
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
// preflight is supposed to verify that only valid FixTypes get to doApply.
|
||||
return tecINTERNAL;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
57
src/xrpld/app/tx/detail/LedgerStateFix.h
Normal file
57
src/xrpld/app/tx/detail/LedgerStateFix.h
Normal file
@@ -0,0 +1,57 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2024 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_TX_LEDGER_STATE_FIX_H_INCLUDED
|
||||
#define RIPPLE_TX_LEDGER_STATE_FIX_H_INCLUDED
|
||||
|
||||
#include <xrpld/app/tx/detail/Transactor.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
#include <xrpl/protocol/Indexes.h>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
class LedgerStateFix : public Transactor
|
||||
{
|
||||
public:
|
||||
enum FixType : std::uint16_t {
|
||||
nfTokenPageLink = 1,
|
||||
};
|
||||
|
||||
static constexpr ConsequencesFactoryType ConsequencesFactory{Normal};
|
||||
|
||||
explicit LedgerStateFix(ApplyContext& ctx) : Transactor(ctx)
|
||||
{
|
||||
}
|
||||
|
||||
static NotTEC
|
||||
preflight(PreflightContext const& ctx);
|
||||
|
||||
static XRPAmount
|
||||
calculateBaseFee(ReadView const& view, STTx const& tx);
|
||||
|
||||
static TER
|
||||
preclaim(PreclaimContext const& ctx);
|
||||
|
||||
TER
|
||||
doApply() override;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
#endif
|
||||
@@ -429,10 +429,48 @@ removeToken(
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
// The page is empty, so we can just unlink it and then remove it.
|
||||
if (prev)
|
||||
{
|
||||
// Make our previous page point to our next page:
|
||||
// With fixNFTokenPageLinks...
|
||||
// The page is empty and there is a prev. If the last page of the
|
||||
// directory is empty then we need to:
|
||||
// 1. Move the contents of the previous page into the last page.
|
||||
// 2. Fix up the link from prev's previous page.
|
||||
// 3. Fix up the owner count.
|
||||
// 4. Erase the previous page.
|
||||
if (view.rules().enabled(fixNFTokenPageLinks) &&
|
||||
((curr->key() & nft::pageMask) == pageMask))
|
||||
{
|
||||
// Copy all relevant information from prev to curr.
|
||||
curr->peekFieldArray(sfNFTokens) = prev->peekFieldArray(sfNFTokens);
|
||||
|
||||
if (auto const prevLink = prev->at(~sfPreviousPageMin))
|
||||
{
|
||||
curr->at(sfPreviousPageMin) = *prevLink;
|
||||
|
||||
// Also fix up the NextPageMin link in the new Previous.
|
||||
auto const newPrev = loadPage(curr, sfPreviousPageMin);
|
||||
newPrev->at(sfNextPageMin) = curr->key();
|
||||
view.update(newPrev);
|
||||
}
|
||||
else
|
||||
{
|
||||
curr->makeFieldAbsent(sfPreviousPageMin);
|
||||
}
|
||||
|
||||
adjustOwnerCount(
|
||||
view,
|
||||
view.peek(keylet::account(owner)),
|
||||
-1,
|
||||
beast::Journal{beast::Journal::getNullSink()});
|
||||
|
||||
view.update(curr);
|
||||
view.erase(prev);
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
// The page is empty and not the last page, so we can just unlink it
|
||||
// and then remove it.
|
||||
if (next)
|
||||
prev->setFieldH256(sfNextPageMin, next->key());
|
||||
else
|
||||
@@ -637,6 +675,124 @@ deleteTokenOffer(ApplyView& view, std::shared_ptr<SLE> const& offer)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
repairNFTokenDirectoryLinks(ApplyView& view, AccountID const& owner)
|
||||
{
|
||||
bool didRepair = false;
|
||||
|
||||
auto const last = keylet::nftpage_max(owner);
|
||||
|
||||
std::shared_ptr<SLE> page = view.peek(Keylet(
|
||||
ltNFTOKEN_PAGE,
|
||||
view.succ(keylet::nftpage_min(owner).key, last.key.next())
|
||||
.value_or(last.key)));
|
||||
|
||||
if (!page)
|
||||
return didRepair;
|
||||
|
||||
if (page->key() == last.key)
|
||||
{
|
||||
// There's only one page in this entire directory. There should be
|
||||
// no links on that page.
|
||||
bool const nextPresent = page->isFieldPresent(sfNextPageMin);
|
||||
bool const prevPresent = page->isFieldPresent(sfPreviousPageMin);
|
||||
if (nextPresent || prevPresent)
|
||||
{
|
||||
didRepair = true;
|
||||
if (prevPresent)
|
||||
page->makeFieldAbsent(sfPreviousPageMin);
|
||||
if (nextPresent)
|
||||
page->makeFieldAbsent(sfNextPageMin);
|
||||
view.update(page);
|
||||
}
|
||||
return didRepair;
|
||||
}
|
||||
|
||||
// First page is not the same as last page. The first page should not
|
||||
// contain a previous link.
|
||||
if (page->isFieldPresent(sfPreviousPageMin))
|
||||
{
|
||||
didRepair = true;
|
||||
page->makeFieldAbsent(sfPreviousPageMin);
|
||||
view.update(page);
|
||||
}
|
||||
|
||||
std::shared_ptr<SLE> nextPage;
|
||||
while (
|
||||
(nextPage = view.peek(Keylet(
|
||||
ltNFTOKEN_PAGE,
|
||||
view.succ(page->key().next(), last.key.next())
|
||||
.value_or(last.key)))))
|
||||
{
|
||||
if (!page->isFieldPresent(sfNextPageMin) ||
|
||||
page->getFieldH256(sfNextPageMin) != nextPage->key())
|
||||
{
|
||||
didRepair = true;
|
||||
page->setFieldH256(sfNextPageMin, nextPage->key());
|
||||
view.update(page);
|
||||
}
|
||||
|
||||
if (!nextPage->isFieldPresent(sfPreviousPageMin) ||
|
||||
nextPage->getFieldH256(sfPreviousPageMin) != page->key())
|
||||
{
|
||||
didRepair = true;
|
||||
nextPage->setFieldH256(sfPreviousPageMin, page->key());
|
||||
view.update(nextPage);
|
||||
}
|
||||
|
||||
if (nextPage->key() == last.key)
|
||||
// We need special handling for the last page.
|
||||
break;
|
||||
|
||||
page = nextPage;
|
||||
}
|
||||
|
||||
// When we arrive here, nextPage should have the same index as last.
|
||||
// If not, then that's something we need to fix.
|
||||
if (!nextPage)
|
||||
{
|
||||
// It turns out that page is the last page for this owner, but
|
||||
// that last page does not have the expected final index. We need
|
||||
// to move the contents of the current last page into a page with the
|
||||
// correct index.
|
||||
//
|
||||
// The owner count does not need to change because, even though
|
||||
// we're adding a page, we'll also remove the page that used to be
|
||||
// last.
|
||||
didRepair = true;
|
||||
nextPage = std::make_shared<SLE>(last);
|
||||
|
||||
// Copy all relevant information from prev to curr.
|
||||
nextPage->peekFieldArray(sfNFTokens) = page->peekFieldArray(sfNFTokens);
|
||||
|
||||
if (auto const prevLink = page->at(~sfPreviousPageMin))
|
||||
{
|
||||
nextPage->at(sfPreviousPageMin) = *prevLink;
|
||||
|
||||
// Also fix up the NextPageMin link in the new Previous.
|
||||
auto const newPrev = view.peek(Keylet(ltNFTOKEN_PAGE, *prevLink));
|
||||
if (!newPrev)
|
||||
Throw<std::runtime_error>(
|
||||
"NFTokenPage directory for " + to_string(owner) +
|
||||
" cannot be repaired. Unexpected link problem.");
|
||||
newPrev->at(sfNextPageMin) = nextPage->key();
|
||||
view.update(newPrev);
|
||||
}
|
||||
view.erase(page);
|
||||
view.insert(nextPage);
|
||||
return didRepair;
|
||||
}
|
||||
|
||||
assert(nextPage);
|
||||
if (nextPage->isFieldPresent(sfNextPageMin))
|
||||
{
|
||||
didRepair = true;
|
||||
nextPage->makeFieldAbsent(sfNextPageMin);
|
||||
view.update(nextPage);
|
||||
}
|
||||
return didRepair;
|
||||
}
|
||||
|
||||
NotTEC
|
||||
tokenOfferCreatePreflight(
|
||||
AccountID const& acctID,
|
||||
|
||||
@@ -95,6 +95,13 @@ removeToken(
|
||||
bool
|
||||
deleteTokenOffer(ApplyView& view, std::shared_ptr<SLE> const& offer);
|
||||
|
||||
/** Repairs the links in an NFTokenPage directory.
|
||||
|
||||
Returns true if a repair took place, otherwise false.
|
||||
*/
|
||||
bool
|
||||
repairNFTokenDirectoryLinks(ApplyView& view, AccountID const& owner);
|
||||
|
||||
bool
|
||||
compareTokens(uint256 const& a, uint256 const& b);
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <xrpld/app/tx/detail/DeleteOracle.h>
|
||||
#include <xrpld/app/tx/detail/DepositPreauth.h>
|
||||
#include <xrpld/app/tx/detail/Escrow.h>
|
||||
#include <xrpld/app/tx/detail/LedgerStateFix.h>
|
||||
#include <xrpld/app/tx/detail/NFTokenAcceptOffer.h>
|
||||
#include <xrpld/app/tx/detail/NFTokenBurn.h>
|
||||
#include <xrpld/app/tx/detail/NFTokenCancelOffer.h>
|
||||
@@ -97,6 +98,8 @@ with_txn_type(TxType txnType, F&& f)
|
||||
return f.template operator()<EscrowFinish>();
|
||||
case ttESCROW_CANCEL:
|
||||
return f.template operator()<EscrowCancel>();
|
||||
case ttLEDGER_STATE_FIX:
|
||||
return f.template operator()<LedgerStateFix>();
|
||||
case ttPAYCHAN_CLAIM:
|
||||
return f.template operator()<PayChanClaim>();
|
||||
case ttPAYCHAN_CREATE:
|
||||
|
||||
Reference in New Issue
Block a user