Compare commits

..

39 Commits

Author SHA1 Message Date
Ed Hennis
a8c03e2e6c Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-02-03 16:08:06 -04:00
Jingchen
6c1a92fe93 refactor: Add ServiceRegistry to help modularization (#6222)
Currently we're passing the `Application` object around, whereby the `Application` class acts more like a service registry that gives other classes access to other services. In order to allow modularization, we should replace `Application` with a service registry class so that modules depending on `Application` for other services can be moved easily. This change adds the `ServiceRegistry` class.
2026-02-03 19:08:27 +00:00
Copilot
7813683091 fix: Deletes expired NFToken offers from ledger (#5707)
This change introduces the `fixExpiredNFTokenOfferRemoval` amendment that allows expired offers to pass through `preclaim()` and be deleted in `doApply()`, following the same pattern used for expired credentials.
2026-02-03 16:37:24 +00:00
Ed Hennis
2167a66bc7 Fix formatting 2026-01-28 19:39:15 -05:00
Ed Hennis
ed948a858c Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-28 18:49:15 -04:00
Ed Hennis
608c102743 Merge commit '5f638f55536def0d88b970d1018a465a238e55f4' into ximinez/acquireAsyncDispatch
* commit '5f638f55536def0d88b970d1018a465a238e55f4':
  chore: Set ColumnLimit to 120 in clang-format (6288)
2026-01-28 17:47:53 -05:00
Ed Hennis
36d1607a4e Merge commit '92046785d1fea5f9efe5a770d636792ea6cab78b' into ximinez/acquireAsyncDispatch
* commit '92046785d1fea5f9efe5a770d636792ea6cab78b':
  test: Fix the `xrpl.net` unit test using async read (6241)
  ci: Upload Conan recipes for develop, release candidates, and releases (6286)
  fix: Stop embedded tests from hanging on ARM by using `atomic_flag` (6248)
  fix:  Remove DEFAULT fields that change to the default in associateAsset (6259) (6273)
  refactor: Update Boost to 1.90 (6280)
  refactor: clean up uses of `std::source_location` (6272)
  ci: Pass missing sanitizers input to actions (6266)
  ci: Properly propagate Conan credentials (6265)
  ci: Explicitly set version when exporting the Conan recipe (6264)
  ci: Use plus instead of hyphen for Conan recipe version suffix (6261)
  chore: Detect uninitialized variables in CMake files (6247)
  ci: Run on-trigger and on-pr when generate-version is modified (6257)
  refactor: Enforce 15-char limit and simplify labels for thread naming (6212)
  docs: Update Ripple Bug Bounty public key (6258)
  ci: Add missing commit hash to Conan recipe version (6256)
  fix: Include `<functional>` header in `Number.h` (6254)
  ci: Upload Conan recipe for merges into develop and commits to release (6235)
  Limit reply size on `TMGetObjectByHash` queries (6110)
  ci: remove 'master' branch as a trigger (6234)
  Improve ledger_entry lookups for fee, amendments, NUNL, and hashes (5644)
2026-01-28 17:47:47 -05:00
Ed Hennis
53ebb86d60 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-15 13:03:36 -04:00
Ed Hennis
1d989bc6de Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-15 12:06:00 -04:00
Ed Hennis
64c0cb8c7e Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-13 18:19:11 -04:00
Ed Hennis
c77cfef41c Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-13 15:28:01 -04:00
Ed Hennis
08aa8c06d1 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-12 14:52:16 -04:00
Ed Hennis
9498672f8e Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-11 00:50:43 -04:00
Ed Hennis
e91d55a0e0 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-08 17:06:11 -04:00
Ed Hennis
afdc452cfc Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-08 13:04:20 -04:00
Ed Hennis
a0d4ef1a54 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2026-01-06 14:02:15 -05:00
Ed Hennis
8bc384f8bf Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-12-22 17:39:59 -05:00
Ed Hennis
bd961c484b Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-12-18 19:59:52 -05:00
Ed Hennis
aee242a8d4 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-12-12 20:34:59 -05:00
Ed Hennis
fcae74de58 Merge remote-tracking branch 'XRPLF/develop' into ximinez/acquireAsyncDispatch
* XRPLF/develop:
  refactor: Rename `ripple` namespace to `xrpl` (5982)
  refactor: Move JobQueue and related classes into xrpl.core module (6121)
  refactor: Rename `rippled` binary to `xrpld` (5983)
  refactor: rename info() to header() (6138)
  refactor: rename `LedgerInfo` to `LedgerHeader` (6136)
  refactor: clean up `RPCHelpers` (5684)
  chore: Fix docs readme and cmake (6122)
  chore: Clean up .gitignore and .gitattributes (6001)
  chore: Use updated secp256k1 recipe (6118)
2025-12-11 15:33:12 -05:00
Ed Hennis
a56effcb00 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-12-05 21:13:10 -05:00
Ed Hennis
64c2eca465 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-12-02 17:37:29 -05:00
Ed Hennis
e56f750e1d Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-12-01 14:40:45 -05:00
Ed Hennis
fde000f3eb Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-28 15:46:44 -05:00
Ed Hennis
d0a62229da Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-27 01:48:56 -05:00
Ed Hennis
d5932cc7d4 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-26 00:25:17 -05:00
Ed Hennis
0b534da781 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-25 14:55:06 -05:00
Ed Hennis
71a70d343b Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-24 21:49:11 -05:00
Ed Hennis
0899e65030 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-24 21:30:22 -05:00
Ed Hennis
31ba529761 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-21 12:47:58 -05:00
Ed Hennis
e2c6e5ebb6 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-18 22:39:29 -05:00
Ed Hennis
9d807fce48 Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-15 03:08:41 -05:00
Ed Hennis
9ef160765c Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-13 12:19:21 -05:00
Ed Hennis
d6c0eb243b Merge branch 'develop' into ximinez/acquireAsyncDispatch 2025-11-12 14:12:55 -05:00
Ed Hennis
84c9fc123c Fix formatting 2025-11-10 19:53:05 -05:00
Ed Hennis
00a2a58cfa Add missing header 2025-11-10 19:53:05 -05:00
Ed Hennis
bb2098d873 Add a unit test for CanProcess
- Delete the copy ctor & operator
2025-11-10 19:53:05 -05:00
Ed Hennis
46a5bc74db refactor: acquireAsync will dispatch the job, not the other way around 2025-11-10 19:53:05 -05:00
Ed Hennis
7b72b9cc82 Improve job queue collision checks and logging
- Improve logging related to ledger acquisition and operating mode
  changes
- Class "CanProcess" to keep track of processing of distinct items
2025-11-10 19:53:05 -05:00
24 changed files with 940 additions and 306 deletions

View File

@@ -153,6 +153,7 @@ tests.libxrpl > xrpl.json
tests.libxrpl > xrpl.net
xrpl.core > xrpl.basics
xrpl.core > xrpl.json
xrpl.core > xrpl.ledger
xrpl.json > xrpl.basics
xrpl.ledger > xrpl.basics
xrpl.ledger > xrpl.protocol

View File

@@ -22,12 +22,6 @@ API version 2 is available in `rippled` version 2.0.0 and later. See [API-VERSIO
This version is supported by all `rippled` versions. For WebSocket and HTTP JSON-RPC requests, it is currently the default API version used when no `api_version` is specified.
## Unreleased
### Bugfixes
- Peer Crawler: The `port` field in `overlay.active[]` now consistently returns an integer instead of a string for outbound peers. [#6318](https://github.com/XRPLF/rippled/pull/6318)
## XRP Ledger server version 3.1.0
[Version 3.1.0](https://github.com/XRPLF/rippled/releases/tag/3.1.0) was released on Jan 27, 2026.

View File

@@ -0,0 +1,138 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2024 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED
#include <functional>
#include <mutex>
#include <set>
/** RAII class to check if an Item is already being processed on another thread,
* as indicated by it's presence in a Collection.
*
* If the Item is not in the Collection, it will be added under lock in the
* ctor, and removed under lock in the dtor. The object will be considered
* "usable" and evaluate to `true`.
*
* If the Item is in the Collection, no changes will be made to the collection,
* and the CanProcess object will be considered "unusable".
*
* It's up to the caller to decide what "usable" and "unusable" mean. (e.g.
* Process or skip a block of code, or set a flag.)
*
* The current use is to avoid lock contention that would be involved in
* processing something associated with the Item.
*
* Examples:
*
* void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...)
* {
* if (CanProcess check{acquiresMutex_, pendingAcquires_, hash})
* {
* acquire(hash, ...);
* }
* }
*
* bool
* NetworkOPsImp::recvValidation(
* std::shared_ptr<STValidation> const& val,
* std::string const& source)
* {
* CanProcess check(
* validationsMutex_, pendingValidations_, val->getLedgerHash());
* BypassAccept bypassAccept =
* check ? BypassAccept::no : BypassAccept::yes;
* handleNewValidation(app_, val, source, bypassAccept, m_journal);
* }
*
*/
class CanProcess
{
public:
template <class Mutex, class Collection, class Item>
CanProcess(Mutex& mtx, Collection& collection, Item const& item) : cleanup_(insert(mtx, collection, item))
{
}
~CanProcess()
{
if (cleanup_)
cleanup_();
}
CanProcess(CanProcess const&) = delete;
CanProcess&
operator=(CanProcess const&) = delete;
explicit
operator bool() const
{
return static_cast<bool>(cleanup_);
}
private:
template <bool useIterator, class Mutex, class Collection, class Item>
std::function<void()>
doInsert(Mutex& mtx, Collection& collection, Item const& item)
{
std::unique_lock<Mutex> lock(mtx);
// TODO: Use structured binding once LLVM 16 is the minimum supported
// version. See also: https://github.com/llvm/llvm-project/issues/48582
// https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c
auto const insertResult = collection.insert(item);
auto const it = insertResult.first;
if (!insertResult.second)
return {};
if constexpr (useIterator)
return [&, it]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(it);
};
else
return [&]() {
std::unique_lock<Mutex> lock(mtx);
collection.erase(item);
};
}
// Generic insert() function doesn't use iterators because they may get
// invalidated
template <class Mutex, class Collection, class Item>
std::function<void()>
insert(Mutex& mtx, Collection& collection, Item const& item)
{
return doInsert<false>(mtx, collection, item);
}
// Specialize insert() for std::set, which does not invalidate iterators for
// insert and erase
template <class Mutex, class Item>
std::function<void()>
insert(Mutex& mtx, std::set<Item>& collection, Item const& item)
{
return doInsert<true>(mtx, collection, item);
}
// If set, then the item is "usable"
std::function<void()> cleanup_;
};
#endif

View File

@@ -0,0 +1,202 @@
#ifndef XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
#define XRPL_CORE_SERVICEREGISTRY_H_INCLUDED
#include <xrpl/basics/Blob.h>
#include <xrpl/basics/SHAMapHash.h>
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/ledger/CachedSLEs.h>
namespace xrpl {
// Forward declarations
namespace NodeStore {
class Database;
}
namespace Resource {
class Manager;
}
namespace perf {
class PerfLog;
}
class AcceptedLedger;
class AmendmentTable;
class Cluster;
class CollectorManager;
class DatabaseCon;
class Family;
class HashRouter;
class InboundLedgers;
class InboundTransactions;
class JobQueue;
class LedgerCleaner;
class LedgerMaster;
class LedgerReplayer;
class LoadFeeTrack;
class LoadManager;
class ManifestCache;
class NetworkOPs;
class OpenLedger;
class OrderBookDB;
class Overlay;
class PathRequests;
class PeerReservationTable;
class PendingSaves;
class RelationalDatabase;
class ServerHandler;
class SHAMapStore;
class TimeKeeper;
class TransactionMaster;
class TxQ;
class ValidatorList;
class ValidatorSite;
template <class Adaptor>
class Validations;
class RCLValidationsAdaptor;
using RCLValidations = Validations<RCLValidationsAdaptor>;
using NodeCache = TaggedCache<SHAMapHash, Blob>;
/** Service registry for dependency injection.
This abstract interface provides access to various services and components
used throughout the application. It separates the service locator pattern
from the Application lifecycle management.
Components that need access to services can hold a reference to
ServiceRegistry rather than Application when they only need service
access and not lifecycle management.
*/
class ServiceRegistry
{
public:
ServiceRegistry() = default;
virtual ~ServiceRegistry() = default;
// Core infrastructure services
virtual CollectorManager&
getCollectorManager() = 0;
virtual Family&
getNodeFamily() = 0;
virtual TimeKeeper&
timeKeeper() = 0;
virtual JobQueue&
getJobQueue() = 0;
virtual NodeCache&
getTempNodeCache() = 0;
virtual CachedSLEs&
cachedSLEs() = 0;
// Protocol and validation services
virtual AmendmentTable&
getAmendmentTable() = 0;
virtual HashRouter&
getHashRouter() = 0;
virtual LoadFeeTrack&
getFeeTrack() = 0;
virtual LoadManager&
getLoadManager() = 0;
virtual RCLValidations&
getValidations() = 0;
virtual ValidatorList&
validators() = 0;
virtual ValidatorSite&
validatorSites() = 0;
virtual ManifestCache&
validatorManifests() = 0;
virtual ManifestCache&
publisherManifests() = 0;
// Network services
virtual Overlay&
overlay() = 0;
virtual Cluster&
cluster() = 0;
virtual PeerReservationTable&
peerReservations() = 0;
virtual Resource::Manager&
getResourceManager() = 0;
// Storage services
virtual NodeStore::Database&
getNodeStore() = 0;
virtual SHAMapStore&
getSHAMapStore() = 0;
virtual RelationalDatabase&
getRelationalDatabase() = 0;
// Ledger services
virtual InboundLedgers&
getInboundLedgers() = 0;
virtual InboundTransactions&
getInboundTransactions() = 0;
virtual TaggedCache<uint256, AcceptedLedger>&
getAcceptedLedgerCache() = 0;
virtual LedgerMaster&
getLedgerMaster() = 0;
virtual LedgerCleaner&
getLedgerCleaner() = 0;
virtual LedgerReplayer&
getLedgerReplayer() = 0;
virtual PendingSaves&
pendingSaves() = 0;
virtual OpenLedger&
openLedger() = 0;
virtual OpenLedger const&
openLedger() const = 0;
// Transaction and operation services
virtual NetworkOPs&
getOPs() = 0;
virtual OrderBookDB&
getOrderBookDB() = 0;
virtual TransactionMaster&
getMasterTransaction() = 0;
virtual TxQ&
getTxQ() = 0;
virtual PathRequests&
getPathRequests() = 0;
// Server services
virtual ServerHandler&
getServerHandler() = 0;
virtual perf::PerfLog&
getPerfLog() = 0;
};
} // namespace xrpl
#endif

View File

@@ -36,6 +36,8 @@ struct LedgerHeader
// If validated is false, it means "not yet validated."
// Once validated is true, it will never be set false at a later time.
// NOTE: If you are accessing this directly, you are probably doing it
// wrong. Use LedgerMaster::isValidated().
// VFALCO TODO Make this not mutable
bool mutable validated = false;
bool accepted = false;

View File

@@ -16,6 +16,7 @@
// Add new amendments to the top of this list.
// Keep it sorted in reverse chronological order.
XRPL_FIX (ExpiredNFTokenOfferRemoval, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (BatchInnerSigs, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(LendingProtocol, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(PermissionDelegationV1_1, Supported::no, VoteBehavior::DefaultNo)

View File

@@ -112,7 +112,6 @@ JSS(accounts); // in: LedgerEntry, Subscribe,
// handlers/Ledger, Unsubscribe
JSS(accounts_proposed); // in: Subscribe, Unsubscribe
JSS(action);
JSS(active); // out: OverlayImpl
JSS(acquiring); // out: LedgerRequest
JSS(address); // out: PeerImp
JSS(affected); // out: AcceptedLedgerTx
@@ -300,7 +299,6 @@ JSS(id); // websocket.
JSS(ident); // in: AccountCurrencies, AccountInfo,
// OwnerInfo
JSS(ignore_default); // in: AccountLines
JSS(in); // out: OverlayImpl
JSS(inLedger); // out: tx/Transaction
JSS(inbound); // out: PeerImp
JSS(index); // in: LedgerEntry
@@ -462,7 +460,6 @@ JSS(open_ledger_fee); // out: TxQ
JSS(open_ledger_level); // out: TxQ
JSS(oracles); // in: get_aggregate_price
JSS(oracle_document_id); // in: get_aggregate_price
JSS(out); // out: OverlayImpl
JSS(owner); // in: LedgerEntry, out: NetworkOPs
JSS(owner_funds); // in/out: Ledger, NetworkOPs, AcceptedLedgerTx
JSS(page_index);

View File

@@ -81,7 +81,12 @@ public:
}
virtual void
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) override
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) override
{
}

View File

@@ -876,42 +876,48 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.fund(XRP(1000), alice, buyer, gw);
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
uint256 const nftAlice0ID = token::getNextID(env, alice, 0, tfTransferable);
env(token::mint(alice, 0u), txflags(tfTransferable));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 1);
uint8_t aliceCount = 1;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const nftXrpOnlyID = token::getNextID(env, alice, 0, tfOnlyXRP | tfTransferable);
env(token::mint(alice, 0), txflags(tfOnlyXRP | tfTransferable));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 1);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 nftNoXferID = token::getNextID(env, alice, 0);
env(token::mint(alice, 0));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 1);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// alice creates sell offers for her nfts.
uint256 const plainOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftAlice0ID, XRP(10)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 2);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const audOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftAlice0ID, gwAUD(30)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 3);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const xrpOnlyOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftXrpOnlyID, XRP(20)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 4);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
uint256 const noXferOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
env(token::createOffer(alice, nftNoXferID, XRP(30)), txflags(tfSellNFToken));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 5);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// alice creates a sell offer that will expire soon.
uint256 const aliceExpOfferIndex = keylet::nftoffer(alice, env.seq(alice)).key;
@@ -919,7 +925,17 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
txflags(tfSellNFToken),
token::expiration(lastClose(env) + 5));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 6);
aliceCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// buyer creates a Buy offer that will expire soon.
uint256 const buyerExpOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, XRP(40)),
token::owner(alice),
token::expiration(lastClose(env) + 5));
env.close();
uint8_t buyerCount = 1;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
//----------------------------------------------------------------------
// preflight
@@ -927,12 +943,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
// Set a negative fee.
env(token::acceptSellOffer(buyer, noXferOfferIndex), fee(STAmount(10ull, true)), ter(temBAD_FEE));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Set an invalid flag.
env(token::acceptSellOffer(buyer, noXferOfferIndex), txflags(0x00008000), ter(temINVALID_FLAG));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Supply nether an sfNFTokenBuyOffer nor an sfNFTokenSellOffer field.
{
@@ -940,7 +956,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
jv.removeMember(sfNFTokenSellOffer.jsonName);
env(jv, ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// A buy offer may not contain a sfNFTokenBrokerFee field.
@@ -949,7 +965,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
jv[sfNFTokenBrokerFee.jsonName] = STAmount(500000).getJson(JsonOptions::none);
env(jv, ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// A sell offer may not contain a sfNFTokenBrokerFee field.
@@ -958,7 +974,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
jv[sfNFTokenBrokerFee.jsonName] = STAmount(500000).getJson(JsonOptions::none);
env(jv, ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// A brokered offer may not contain a negative or zero brokerFee.
@@ -966,7 +982,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
token::brokerFee(gwAUD(0)),
ter(temMALFORMED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
//----------------------------------------------------------------------
// preclaim
@@ -974,33 +990,48 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
// The buy offer must be non-zero.
env(token::acceptBuyOffer(buyer, beast::zero), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The buy offer must be present in the ledger.
uint256 const missingOfferIndex = keylet::nftoffer(alice, 1).key;
env(token::acceptBuyOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The buy offer must not have expired.
env(token::acceptBuyOffer(buyer, aliceExpOfferIndex), ter(tecEXPIRED));
// NOTE: this is only a preclaim check with the
// fixExpiredNFTokenOfferRemoval amendment disabled.
env(token::acceptBuyOffer(alice, buyerExpOfferIndex), ter(tecEXPIRED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
if (features[fixExpiredNFTokenOfferRemoval])
{
buyerCount--;
}
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The sell offer must be non-zero.
env(token::acceptSellOffer(buyer, beast::zero), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The sell offer must be present in the ledger.
env(token::acceptSellOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The sell offer must not have expired.
// NOTE: this is only a preclaim check with the
// fixExpiredNFTokenOfferRemoval amendment disabled.
env(token::acceptSellOffer(buyer, aliceExpOfferIndex), ter(tecEXPIRED));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 0);
// Alice's count is decremented by one when the expired offer is
// removed.
if (features[fixExpiredNFTokenOfferRemoval])
{
aliceCount--;
}
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
//----------------------------------------------------------------------
// preclaim brokered
@@ -1012,8 +1043,13 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(pay(gw, buyer, gwAUD(30)));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
aliceCount++;
buyerCount++;
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// We're about to exercise offer brokering, so we need
// corresponding buy and sell offers.
@@ -1022,35 +1058,38 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, gwAUD(29)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// gw attempts to broker offers that are not for the same token.
env(token::brokerOffers(gw, buyerOfferIndex, xrpOnlyOfferIndex), ter(tecNFTOKEN_BUY_SELL_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// gw attempts to broker offers that are not for the same currency.
env(token::brokerOffers(gw, buyerOfferIndex, plainOfferIndex), ter(tecNFTOKEN_BUY_SELL_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// In a brokered offer, the buyer must offer greater than or
// equal to the selling price.
env(token::brokerOffers(gw, buyerOfferIndex, audOfferIndex), ter(tecINSUFFICIENT_PAYMENT));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Remove buyer's offer.
env(token::cancelOffer(buyer, {buyerOfferIndex}));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 1);
buyerCount--;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
{
// buyer creates a buy offer for one of alice's nfts.
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, gwAUD(31)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Broker sets their fee in a denomination other than the one
// used by the offers
@@ -1058,14 +1097,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
token::brokerFee(XRP(40)),
ter(tecNFTOKEN_BUY_SELL_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Broker fee way too big.
env(token::brokerOffers(gw, buyerOfferIndex, audOfferIndex),
token::brokerFee(gwAUD(31)),
ter(tecINSUFFICIENT_PAYMENT));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Broker fee is smaller, but still too big once the offer
// seller's minimum is taken into account.
@@ -1073,12 +1112,13 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
token::brokerFee(gwAUD(1.5)),
ter(tecINSUFFICIENT_PAYMENT));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Remove buyer's offer.
env(token::cancelOffer(buyer, {buyerOfferIndex}));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 1);
buyerCount--;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
//----------------------------------------------------------------------
// preclaim buy
@@ -1087,17 +1127,18 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftAlice0ID, gwAUD(30)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Don't accept a buy offer if the sell flag is set.
env(token::acceptBuyOffer(buyer, plainOfferIndex), ter(tecNFTOKEN_OFFER_TYPE_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// An account can't accept its own offer.
env(token::acceptBuyOffer(buyer, buyerOfferIndex), ter(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An offer acceptor must have enough funds to pay for the offer.
env(pay(buyer, gw, gwAUD(30)));
@@ -1105,7 +1146,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
BEAST_EXPECT(env.balance(buyer, gwAUD) == gwAUD(0));
env(token::acceptBuyOffer(alice, buyerOfferIndex), ter(tecINSUFFICIENT_FUNDS));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// alice gives her NFT to gw, so alice no longer owns nftAlice0.
{
@@ -1114,7 +1155,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(gw, offerIndex));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
}
env(pay(gw, buyer, gwAUD(30)));
env.close();
@@ -1122,12 +1163,13 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
// alice can't accept a buy offer for an NFT she no longer owns.
env(token::acceptBuyOffer(alice, buyerOfferIndex), ter(tecNO_PERMISSION));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Remove buyer's offer.
env(token::cancelOffer(buyer, {buyerOfferIndex}));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 1);
buyerCount--;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
//----------------------------------------------------------------------
// preclaim sell
@@ -1136,23 +1178,24 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const buyerOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftXrpOnlyID, XRP(30)), token::owner(alice));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
buyerCount++;
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Don't accept a sell offer without the sell flag set.
env(token::acceptSellOffer(alice, buyerOfferIndex), ter(tecNFTOKEN_OFFER_TYPE_MISMATCH));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
// An account can't accept its own offer.
env(token::acceptSellOffer(alice, plainOfferIndex), ter(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The seller must currently be in possession of the token they
// are selling. alice gave nftAlice0ID to gw.
env(token::acceptSellOffer(buyer, plainOfferIndex), ter(tecNO_PERMISSION));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// gw gives nftAlice0ID back to alice. That allows us to check
// buyer attempting to accept one of alice's offers with
@@ -1163,14 +1206,14 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(alice, offerIndex));
env.close();
BEAST_EXPECT(ownerCount(env, alice) == 7);
BEAST_EXPECT(ownerCount(env, alice) == aliceCount);
}
env(pay(buyer, gw, gwAUD(30)));
env.close();
BEAST_EXPECT(env.balance(buyer, gwAUD) == gwAUD(0));
env(token::acceptSellOffer(buyer, audOfferIndex), ter(tecINSUFFICIENT_FUNDS));
env.close();
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
//----------------------------------------------------------------------
@@ -2769,6 +2812,7 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const nftokenID1 = token::getNextID(env, issuer, 0, tfTransferable);
env(token::mint(minter, 0), token::issuer(issuer), txflags(tfTransferable));
env.close();
uint8_t issuerCount, minterCount, buyerCount;
// Test how adding an Expiration field to an offer affects permissions
// for cancelling offers.
@@ -2792,9 +2836,12 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const offerBuyerToMinter = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter), token::expiration(expiration));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 1);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
issuerCount = 1;
minterCount = 3;
buyerCount = 1;
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Test who gets to cancel the offers. Anyone outside of the
// offer-owner/destination pair should not be able to cancel
@@ -2806,32 +2853,36 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::cancelOffer(buyer, {offerIssuerToMinter}), ter(tecNO_PERMISSION));
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 1);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// The offer creator can cancel their own unexpired offer.
env(token::cancelOffer(minter, {offerMinterToAnyone}));
minterCount--;
// The destination of a sell offer can cancel the NFT owner's
// unexpired offer.
env(token::cancelOffer(issuer, {offerMinterToIssuer}));
minterCount--;
// Close enough ledgers to get past the expiration.
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 1);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can cancel expired offers.
env(token::cancelOffer(issuer, {offerBuyerToMinter}));
buyerCount--;
env(token::cancelOffer(buyer, {offerIssuerToMinter}));
issuerCount--;
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that:
// 1. An unexpired sell offer with an expiration can be accepted.
@@ -2844,44 +2895,70 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::createOffer(minter, nftokenID0, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
uint256 const offer1 = keylet::nftoffer(minter, env.seq(minter)).key;
env(token::createOffer(minter, nftokenID1, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can accept an unexpired sell offer.
env(token::acceptSellOffer(buyer, offer0));
minterCount--;
buyerCount++;
// Close enough ledgers to get past the expiration.
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// No one can accept an expired sell offer.
env(token::acceptSellOffer(buyer, offer1), ter(tecEXPIRED));
env(token::acceptSellOffer(issuer, offer1), ter(tecEXPIRED));
// With fixExpiredNFTokenOfferRemoval amendment, the first accept
// attempt deletes the expired offer. Without the amendment,
// the offer remains and we can try to accept it again.
if (features[fixExpiredNFTokenOfferRemoval])
{
// After amendment: offer was deleted by first accept attempt
minterCount--;
env(token::acceptSellOffer(issuer, offer1), ter(tecOBJECT_NOT_FOUND));
}
else
{
// Before amendment: offer still exists, second accept also
// fails
env(token::acceptSellOffer(issuer, offer1), ter(tecEXPIRED));
}
env.close();
// The expired sell offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
// Check if the expired sell offer behavior matches amendment status
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can cancel the expired sell offer.
env(token::cancelOffer(issuer, {offer1}));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
if (!features[fixExpiredNFTokenOfferRemoval])
{
// Before amendment: expired offer still exists and needs to be
// cancelled
env(token::cancelOffer(issuer, {offer1}));
env.close();
minterCount--;
}
// Ensure that owner counts are correct with and without the
// amendment
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
// Transfer nftokenID0 back to minter so we start the next test in
// a simple place.
@@ -2889,10 +2966,11 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::createOffer(buyer, nftokenID0, XRP(0)), txflags(tfSellNFToken), token::destination(minter));
env.close();
env(token::acceptSellOffer(minter, offerSellBack));
buyerCount--;
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that:
// 1. An unexpired buy offer with an expiration can be accepted.
@@ -2903,14 +2981,16 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
uint256 const offer0 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter), token::expiration(expiration));
buyerCount++;
uint256 const offer1 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID1, drops(1)), token::owner(minter), token::expiration(expiration));
buyerCount++;
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An unexpired buy offer can be accepted.
env(token::acceptBuyOffer(minter, offer0));
@@ -2919,26 +2999,48 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An expired buy offer cannot be accepted.
env(token::acceptBuyOffer(minter, offer1), ter(tecEXPIRED));
env(token::acceptBuyOffer(issuer, offer1), ter(tecEXPIRED));
// With fixExpiredNFTokenOfferRemoval amendment, the first accept
// attempt deletes the expired offer. Without the amendment,
// the offer remains and we can try to accept it again.
if (features[fixExpiredNFTokenOfferRemoval])
{
// After amendment: offer was deleted by first accept attempt
buyerCount--;
env(token::acceptBuyOffer(issuer, offer1), ter(tecOBJECT_NOT_FOUND));
}
else
{
// Before amendment: offer still exists, second accept also
// fails
env(token::acceptBuyOffer(issuer, offer1), ter(tecEXPIRED));
}
env.close();
// The expired buy offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Check if the expired buy offer behavior matches amendment status
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// Anyone can cancel the expired buy offer.
env(token::cancelOffer(issuer, {offer1}));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
if (!features[fixExpiredNFTokenOfferRemoval])
{
// Before amendment: expired offer still exists and can be
// cancelled
env(token::cancelOffer(issuer, {offer1}));
env.close();
buyerCount--;
}
// Ensure that owner counts are the same with and without the
// amendment
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
// Transfer nftokenID0 back to minter so we start the next test in
// a simple place.
@@ -2947,9 +3049,10 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(minter, offerSellBack));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
buyerCount--;
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that in brokered mode:
// 1. An unexpired sell offer with an expiration can be accepted.
@@ -2962,50 +3065,74 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env(token::createOffer(minter, nftokenID0, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
uint256 const sellOffer1 = keylet::nftoffer(minter, env.seq(minter)).key;
env(token::createOffer(minter, nftokenID1, drops(1)),
token::expiration(expiration),
txflags(tfSellNFToken));
minterCount++;
uint256 const buyOffer0 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID0, drops(1)), token::owner(minter));
buyerCount++;
uint256 const buyOffer1 = keylet::nftoffer(buyer, env.seq(buyer)).key;
env(token::createOffer(buyer, nftokenID1, drops(1)), token::owner(minter));
buyerCount++;
env.close();
BEAST_EXPECT(lastClose(env) < expiration);
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 3);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// An unexpired offer can be brokered.
env(token::brokerOffers(issuer, buyOffer0, sellOffer0));
minterCount--;
// Close enough ledgers to get past the expiration.
while (lastClose(env) < expiration)
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
// If the sell offer is expired it cannot be brokered.
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
env.close();
// The expired sell offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
if (features[fixExpiredNFTokenOfferRemoval])
{
// With amendment: expired offers are deleted
minterCount--;
}
// Anyone can cancel the expired sell offer.
env(token::cancelOffer(buyer, {buyOffer1, sellOffer1}));
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
if (features[fixExpiredNFTokenOfferRemoval])
{
// The buy offer was deleted, so no need to cancel it
// The sell offer still exists, so we can cancel it
env(token::cancelOffer(buyer, {buyOffer1}));
buyerCount--;
}
else
{
// Anyone can cancel the expired offers
env(token::cancelOffer(buyer, {buyOffer1, sellOffer1}));
minterCount--;
buyerCount--;
}
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
// Ensure that owner counts are the same with and without the
// amendment
BEAST_EXPECT(ownerCount(env, issuer) == 0 && issuerCount == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1 && minterCount == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 1 && buyerCount == 1);
// Transfer nftokenID0 back to minter so we start the next test in
// a simple place.
@@ -3014,9 +3141,10 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
env.close();
env(token::acceptSellOffer(minter, offerSellBack));
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
BEAST_EXPECT(ownerCount(env, buyer) == 0);
buyerCount--;
BEAST_EXPECT(ownerCount(env, issuer) == issuerCount);
BEAST_EXPECT(ownerCount(env, minter) == minterCount);
BEAST_EXPECT(ownerCount(env, buyer) == buyerCount);
}
// Show that in brokered mode:
// 1. An unexpired buy offer with an expiration can be accepted.
@@ -3054,17 +3182,28 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// If the buy offer is expired it cannot be brokered.
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
env.close();
// The expired buy offer is still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired buy offer.
env(token::cancelOffer(minter, {buyOffer1, sellOffer1}));
if (features[fixExpiredNFTokenOfferRemoval])
{
// After amendment: expired offers were deleted during broker
// attempt
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 1);
// The buy offer was deleted, so no need to cancel it
// The sell offer still exists, so we can cancel it
env(token::cancelOffer(minter, {sellOffer1}));
}
else
{
// Before amendment: expired offers still exist in ledger
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired offers
env(token::cancelOffer(minter, {buyOffer1, sellOffer1}));
}
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
@@ -3122,17 +3261,19 @@ class NFTokenBaseUtil_test : public beast::unit_test::suite
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// If the offers are expired they cannot be brokered.
env(token::brokerOffers(issuer, buyOffer1, sellOffer1), ter(tecEXPIRED));
env.close();
// The expired offers are still in the ledger.
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired offers.
env(token::cancelOffer(issuer, {buyOffer1, sellOffer1}));
if (!features[fixExpiredNFTokenOfferRemoval])
{
// Before amendment: expired offers still exist in ledger
BEAST_EXPECT(ownerCount(env, minter) == 2);
BEAST_EXPECT(ownerCount(env, buyer) == 2);
// Anyone can cancel the expired offers
env(token::cancelOffer(issuer, {buyOffer1, sellOffer1}));
}
env.close();
BEAST_EXPECT(ownerCount(env, issuer) == 0);
BEAST_EXPECT(ownerCount(env, minter) == 1);
@@ -6736,7 +6877,9 @@ public:
void
run() override
{
testWithFeats(allFeatures - fixNFTokenReserve - featureNFTokenMintOffer - featureDynamicNFT);
testWithFeats(
allFeatures - fixNFTokenReserve - featureNFTokenMintOffer - featureDynamicNFT -
fixExpiredNFTokenOfferRemoval);
}
};
@@ -6767,6 +6910,15 @@ class NFTokenWOModify_test : public NFTokenBaseUtil_test
}
};
class NFTokenWOExpiredOfferRemoval_test : public NFTokenBaseUtil_test
{
void
run() override
{
testWithFeats(allFeatures - fixExpiredNFTokenOfferRemoval);
}
};
class NFTokenAllFeatures_test : public NFTokenBaseUtil_test
{
void

View File

@@ -0,0 +1,157 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012-2016 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <xrpl/basics/CanProcess.h>
#include <xrpl/beast/unit_test.h>
#include <memory>
namespace ripple {
namespace test {
struct CanProcess_test : beast::unit_test::suite
{
template <class Mutex, class Collection, class Item>
void
test(std::string const& name, Mutex& mtx, Collection& collection, std::vector<Item> const& items)
{
testcase(name);
if (!BEAST_EXPECT(!items.empty()))
return;
if (!BEAST_EXPECT(collection.empty()))
return;
// CanProcess objects can't be copied or moved. To make that easier,
// store shared_ptrs
std::vector<std::shared_ptr<CanProcess>> trackers;
// Fill up the vector with two CanProcess for each Item. The first
// inserts the item into the collection and is "good". The second does
// not and is "bad".
for (int i = 0; i < items.size(); ++i)
{
{
auto const& good = trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(*good);
}
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
BEAST_EXPECT(collection.size() == i + 1);
{
auto const& bad = trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(!*bad);
}
BEAST_EXPECT(trackers.size() == 2 * (i + 1));
BEAST_EXPECT(collection.size() == i + 1);
}
BEAST_EXPECT(collection.size() == items.size());
// Now remove the items from the vector<CanProcess> two at a time, and
// try to get another CanProcess for that item.
for (int i = 0; i < items.size(); ++i)
{
// Remove the "bad" one in the second position
// This will have no effect on the collection
{
auto const iter = trackers.begin() + 1;
BEAST_EXPECT(!**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
BEAST_EXPECT(collection.size() == items.size());
{
// Append a new "bad" one
auto const& bad = trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(!*bad);
}
BEAST_EXPECT(trackers.size() == 2 * items.size());
BEAST_EXPECT(collection.size() == items.size());
// Remove the "good" one from the front
{
auto const iter = trackers.begin();
BEAST_EXPECT(**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * items.size()) - 1);
BEAST_EXPECT(collection.size() == items.size() - 1);
{
// Append a new "good" one
auto const& good = trackers.emplace_back(std::make_shared<CanProcess>(mtx, collection, items[i]));
BEAST_EXPECT(*good);
}
BEAST_EXPECT(trackers.size() == 2 * items.size());
BEAST_EXPECT(collection.size() == items.size());
}
// Now remove them all two at a time
for (int i = items.size() - 1; i >= 0; --i)
{
// Remove the "bad" one from the front
{
auto const iter = trackers.begin();
BEAST_EXPECT(!**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == (2 * i) + 1);
BEAST_EXPECT(collection.size() == i + 1);
// Remove the "good" one now in front
{
auto const iter = trackers.begin();
BEAST_EXPECT(**iter);
trackers.erase(iter);
}
BEAST_EXPECT(trackers.size() == 2 * i);
BEAST_EXPECT(collection.size() == i);
}
BEAST_EXPECT(trackers.empty());
BEAST_EXPECT(collection.empty());
}
void
run() override
{
{
std::mutex m;
std::set<int> collection;
std::vector<int> const items{1, 2, 3, 4, 5};
test("set of int", m, collection, items);
}
{
std::mutex m;
std::set<std::string> collection;
std::vector<std::string> const items{"one", "two", "three", "four", "five"};
test("set of string", m, collection, items);
}
{
std::mutex m;
std::unordered_set<char> collection;
std::vector<char> const items{'1', '2', '3', '4', '5'};
test("unorderd_set of char", m, collection, items);
}
{
std::mutex m;
std::unordered_set<std::uint64_t> collection;
std::vector<std::uint64_t> const items{100u, 1000u, 150u, 4u, 0u};
test("unordered_set of uint64_t", m, collection, items);
}
}
};
BEAST_DEFINE_TESTSUITE(CanProcess, ripple_basics, ripple);
} // namespace test
} // namespace ripple

View File

@@ -100,10 +100,7 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash)
// Tell the ledger acquire system that we need the consensus ledger
acquiringLedger_ = hash;
app_.getJobQueue().addJob(jtADVANCE, "GetConsL1", [id = hash, &app = app_, this]() {
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger1 started";
app.getInboundLedgers().acquireAsync(id, 0, InboundLedger::Reason::CONSENSUS);
});
app_.getInboundLedgers().acquireAsync(jtADVANCE, "GetConsL1", hash, 0, InboundLedger::Reason::CONSENSUS);
}
return std::nullopt;
}
@@ -918,7 +915,7 @@ void
RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const
{
if (!positions && app_.getOPs().isFull())
app_.getOPs().setMode(OperatingMode::CONNECTED);
app_.getOPs().setMode(OperatingMode::CONNECTED, "updateOperatingMode: no positions");
}
void

View File

@@ -109,12 +109,7 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash)
{
JLOG(j_.warn()) << "Need validated ledger for preferred ledger analysis " << hash;
Application* pApp = &app_;
app_.getJobQueue().addJob(jtADVANCE, "GetConsL2", [pApp, hash, this]() {
JLOG(j_.debug()) << "JOB advanceLedger getConsensusLedger2 started";
pApp->getInboundLedgers().acquireAsync(hash, 0, InboundLedger::Reason::CONSENSUS);
});
app_.getInboundLedgers().acquireAsync(jtADVANCE, "GetConsL2", hash, 0, InboundLedger::Reason::CONSENSUS);
return std::nullopt;
}

View File

@@ -27,7 +27,12 @@ public:
// Queue. TODO review whether all callers of acquire() can use this
// instead. Inbound ledger acquisition is asynchronous anyway.
virtual void
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) = 0;
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) = 0;
virtual std::shared_ptr<InboundLedger>
find(LedgerHash const& hash) = 0;

View File

@@ -344,7 +344,14 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&)
if (!wasProgress)
{
checkLocal();
if (checkLocal())
{
// Done. Something else (probably consensus) built the ledger
// locally while waiting for data (or possibly before requesting)
XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done");
JLOG(journal_.info()) << "Finished while waiting " << hash_;
return;
}
mByHash = true;

View File

@@ -3,9 +3,9 @@
#include <xrpld/app/main/Application.h>
#include <xrpld/app/misc/NetworkOPs.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/DecayingSample.h>
#include <xrpl/basics/Log.h>
#include <xrpl/basics/scope.h>
#include <xrpl/beast/container/aged_map.h>
#include <xrpl/core/JobQueue.h>
#include <xrpl/core/PerfLog.h>
@@ -58,12 +58,15 @@ public:
(reason != InboundLedger::Reason::CONSENSUS))
return {};
std::stringstream ss;
bool isNew = true;
std::shared_ptr<InboundLedger> inbound;
{
ScopedLockType sl(mLock);
if (stopping_)
{
JLOG(j_.debug()) << "Abort(stopping): " << ss.str();
return {};
}
@@ -82,46 +85,60 @@ public:
++mCounter;
}
}
ss << " IsNew: " << (isNew ? "true" : "false");
if (inbound->isFailed())
{
JLOG(j_.debug()) << "Abort(failed): " << ss.str();
return {};
}
if (!isNew)
inbound->update(seq);
if (!inbound->isComplete())
{
JLOG(j_.debug()) << "InProgress: " << ss.str();
return {};
}
JLOG(j_.debug()) << "Complete: " << ss.str();
return inbound->getLedger();
};
using namespace std::chrono_literals;
std::shared_ptr<Ledger const> ledger =
perf::measureDurationAndLog(doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
return ledger;
return perf::measureDurationAndLog(doAcquire, "InboundLedgersImp::acquire", 500ms, j_);
}
void
acquireAsync(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason reason) override
acquireAsync(
JobType type,
std::string const& name,
uint256 const& hash,
std::uint32_t seq,
InboundLedger::Reason reason) override
{
std::unique_lock lock(acquiresMutex_);
try
if (auto check = std::make_shared<CanProcess const>(acquiresMutex_, pendingAcquires_, hash); *check)
{
if (pendingAcquires_.contains(hash))
return;
pendingAcquires_.insert(hash);
scope_unlock unlock(lock);
acquire(hash, seq, reason);
app_.getJobQueue().addJob(type, name, [check, name, hash, seq, reason, this]() {
JLOG(j_.debug()) << "JOB acquireAsync " << name << " started ";
try
{
acquire(hash, seq, reason);
}
catch (std::exception const& e)
{
JLOG(j_.warn()) << "Exception thrown for acquiring new "
"inbound ledger "
<< hash << ": " << e.what();
}
catch (...)
{
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new "
"inbound ledger "
<< hash;
}
});
}
catch (std::exception const& e)
{
JLOG(j_.warn()) << "Exception thrown for acquiring new inbound ledger " << hash << ": " << e.what();
}
catch (...)
{
JLOG(j_.warn()) << "Unknown exception thrown for acquiring new inbound ledger " << hash;
}
pendingAcquires_.erase(hash);
}
std::shared_ptr<InboundLedger>

View File

@@ -890,8 +890,8 @@ LedgerMaster::checkAccept(std::shared_ptr<Ledger const> const& ledger)
return;
}
JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq << " with >= " << minVal
<< " validations";
JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq << " ("
<< to_short_string(ledger->header().hash) << ") with >= " << minVal << " validations";
ledger->setValidated();
ledger->setFull();

View File

@@ -13,7 +13,8 @@ TimeoutCounter::TimeoutCounter(
QueueJobParameter&& jobParameter,
beast::Journal journal)
: app_(app)
, journal_(journal)
, sink_(journal, to_short_string(hash) + " ")
, journal_(sink_)
, hash_(hash)
, timeouts_(0)
, complete_(false)
@@ -33,6 +34,7 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
{
if (isDone())
return;
JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count() << "ms";
timer_.expires_after(timerInterval_);
timer_.async_wait([wptr = pmDowncast()](boost::system::error_code const& ec) {
if (ec == boost::asio::error::operation_aborted)
@@ -40,6 +42,9 @@ TimeoutCounter::setTimer(ScopedLockType& sl)
if (auto ptr = wptr.lock())
{
JLOG(ptr->journal_.debug()) << "timer: ec: " << ec
<< " (operation_aborted: " << boost::asio::error::operation_aborted << " - "
<< (ec == boost::asio::error::operation_aborted ? "aborted" : "other") << ")";
ScopedLockType sl(ptr->mtx_);
ptr->queueJob(sl);
}

View File

@@ -4,6 +4,7 @@
#include <xrpld/app/main/Application.h>
#include <xrpl/beast/utility/Journal.h>
#include <xrpl/beast/utility/WrappedSink.h>
#include <xrpl/core/Job.h>
#include <boost/asio/basic_waitable_timer.hpp>
@@ -104,6 +105,7 @@ protected:
// Used in this class for access to boost::asio::io_context and
// xrpl::Overlay. Used in subtypes for the kitchen sink.
Application& app_;
beast::WrappedSink sink_;
beast::Journal journal_;
mutable std::recursive_mutex mtx_;

View File

@@ -6,6 +6,7 @@
#include <xrpl/basics/TaggedCache.h>
#include <xrpl/beast/utility/PropertyStream.h>
#include <xrpl/core/ServiceRegistry.h>
#include <xrpl/protocol/Protocol.h>
#include <xrpl/shamap/TreeNodeCache.h>
@@ -91,7 +92,7 @@ class Validations;
class RCLValidationsAdaptor;
using RCLValidations = Validations<RCLValidationsAdaptor>;
class Application : public beast::PropertyStream::Source
class Application : public ServiceRegistry, public beast::PropertyStream::Source
{
public:
/* VFALCO NOTE
@@ -146,92 +147,12 @@ public:
virtual boost::asio::io_context&
getIOContext() = 0;
virtual CollectorManager&
getCollectorManager() = 0;
virtual Family&
getNodeFamily() = 0;
virtual TimeKeeper&
timeKeeper() = 0;
virtual JobQueue&
getJobQueue() = 0;
virtual NodeCache&
getTempNodeCache() = 0;
virtual CachedSLEs&
cachedSLEs() = 0;
virtual AmendmentTable&
getAmendmentTable() = 0;
virtual HashRouter&
getHashRouter() = 0;
virtual LoadFeeTrack&
getFeeTrack() = 0;
virtual LoadManager&
getLoadManager() = 0;
virtual Overlay&
overlay() = 0;
virtual TxQ&
getTxQ() = 0;
virtual ValidatorList&
validators() = 0;
virtual ValidatorSite&
validatorSites() = 0;
virtual ManifestCache&
validatorManifests() = 0;
virtual ManifestCache&
publisherManifests() = 0;
virtual Cluster&
cluster() = 0;
virtual PeerReservationTable&
peerReservations() = 0;
virtual RCLValidations&
getValidations() = 0;
virtual NodeStore::Database&
getNodeStore() = 0;
virtual InboundLedgers&
getInboundLedgers() = 0;
virtual InboundTransactions&
getInboundTransactions() = 0;
virtual TaggedCache<uint256, AcceptedLedger>&
getAcceptedLedgerCache() = 0;
virtual LedgerMaster&
getLedgerMaster() = 0;
virtual LedgerCleaner&
getLedgerCleaner() = 0;
virtual LedgerReplayer&
getLedgerReplayer() = 0;
virtual NetworkOPs&
getOPs() = 0;
virtual OrderBookDB&
getOrderBookDB() = 0;
virtual ServerHandler&
getServerHandler() = 0;
virtual TransactionMaster&
getMasterTransaction() = 0;
virtual perf::PerfLog&
getPerfLog() = 0;
virtual std::pair<PublicKey, SecretKey> const&
nodeIdentity() = 0;
virtual std::optional<PublicKey const>
getValidationPublicKey() const = 0;
virtual Resource::Manager&
getResourceManager() = 0;
virtual PathRequests&
getPathRequests() = 0;
virtual SHAMapStore&
getSHAMapStore() = 0;
virtual PendingSaves&
pendingSaves() = 0;
virtual OpenLedger&
openLedger() = 0;
virtual OpenLedger const&
openLedger() const = 0;
virtual RelationalDatabase&
getRelationalDatabase() = 0;
virtual std::chrono::milliseconds
getIOLatency() = 0;

View File

@@ -200,7 +200,7 @@ public:
/** Add a suppression peer and get message's relay status.
* Return pair:
* element 1: true if the peer is added.
* element 1: true if the key is added.
* element 2: optional is seated to the relay time point or
* is unseated if has not relayed yet. */
std::pair<bool, std::optional<Stopwatch::time_point>>

View File

@@ -33,10 +33,10 @@
#include <xrpld/rpc/MPTokenIssuanceID.h>
#include <xrpld/rpc/ServerHandler.h>
#include <xrpl/basics/CanProcess.h>
#include <xrpl/basics/UptimeClock.h>
#include <xrpl/basics/mulDiv.h>
#include <xrpl/basics/safe_cast.h>
#include <xrpl/basics/scope.h>
#include <xrpl/beast/utility/rngfill.h>
#include <xrpl/core/PerfLog.h>
#include <xrpl/crypto/RFC1751.h>
@@ -378,7 +378,7 @@ public:
isFull() override;
void
setMode(OperatingMode om) override;
setMode(OperatingMode om, char const* reason) override;
bool
isBlocked() override;
@@ -809,7 +809,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
inline void
NetworkOPsImp::setStandAlone()
{
setMode(OperatingMode::FULL);
setMode(OperatingMode::FULL, "setStandAlone");
}
inline void
@@ -945,7 +945,7 @@ NetworkOPsImp::processHeartbeatTimer()
{
if (mMode != OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::DISCONNECTED);
setMode(OperatingMode::DISCONNECTED, "Heartbeat: insufficient peers");
std::stringstream ss;
ss << "Node count (" << numPeers << ") has fallen "
<< "below required minimum (" << minPeerCount_ << ").";
@@ -969,7 +969,7 @@ NetworkOPsImp::processHeartbeatTimer()
if (mMode == OperatingMode::DISCONNECTED)
{
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
JLOG(m_journal.info()) << "Node count (" << numPeers << ") is sufficient.";
CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers << " peers. ";
}
@@ -979,9 +979,9 @@ NetworkOPsImp::processHeartbeatTimer()
auto origMode = mMode.load();
CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
if (mMode == OperatingMode::SYNCING)
setMode(OperatingMode::SYNCING);
setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
else if (mMode == OperatingMode::CONNECTED)
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
auto newMode = mMode.load();
if (origMode != newMode)
{
@@ -1650,7 +1650,7 @@ void
NetworkOPsImp::setAmendmentBlocked()
{
amendmentBlocked_ = true;
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
}
inline bool
@@ -1681,7 +1681,7 @@ void
NetworkOPsImp::setUNLBlocked()
{
unlBlocked_ = true;
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "setUNLBlocked");
}
inline void
@@ -1776,7 +1776,7 @@ NetworkOPsImp::checkLastClosedLedger(Overlay::PeerSequence const& peerList, uint
if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
{
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
}
if (consensus)
@@ -1856,8 +1856,8 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed, std::unique_ptr<std:
// this shouldn't happen unless we jump ledgers
if (mMode == OperatingMode::FULL)
{
JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING);
JLOG(m_journal.warn()) << "beginConsensus Don't have LCL, going to tracking";
setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
}
@@ -1981,7 +1981,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
// validations we have for LCL. If the ledger is good enough, go to
// TRACKING - TODO
if (!needNetworkLedger_)
setMode(OperatingMode::TRACKING);
setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
}
if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) && !ledgerChange)
@@ -1992,7 +1992,7 @@ NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
auto current = m_ledgerMaster.getCurrentLedger();
if (app_.timeKeeper().now() < (current->header().parentCloseTime + 2 * current->header().closeTimeResolution))
{
setMode(OperatingMode::FULL);
setMode(OperatingMode::FULL, "endConsensus: check full");
}
}
@@ -2004,7 +2004,7 @@ NetworkOPsImp::consensusViewChange()
{
if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
{
setMode(OperatingMode::CONNECTED);
setMode(OperatingMode::CONNECTED, "consensusViewChange");
}
}
@@ -2302,7 +2302,7 @@ NetworkOPsImp::pubPeerStatus(std::function<Json::Value(void)> const& func)
}
void
NetworkOPsImp::setMode(OperatingMode om)
NetworkOPsImp::setMode(OperatingMode om, char const* reason)
{
using namespace std::chrono_literals;
if (om == OperatingMode::CONNECTED)
@@ -2322,11 +2322,12 @@ NetworkOPsImp::setMode(OperatingMode om)
if (mMode == om)
return;
auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
mMode = om;
accounting_.mode(om);
JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
pubServer();
}
@@ -2335,31 +2336,23 @@ NetworkOPsImp::recvValidation(std::shared_ptr<STValidation> const& val, std::str
{
JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source;
std::unique_lock lock(validationsMutex_);
BypassAccept bypassAccept = BypassAccept::no;
try
{
if (pendingValidations_.contains(val->getLedgerHash()))
bypassAccept = BypassAccept::yes;
else
pendingValidations_.insert(val->getLedgerHash());
scope_unlock unlock(lock);
handleNewValidation(app_, val, source, bypassAccept, m_journal);
CanProcess const check(validationsMutex_, pendingValidations_, val->getLedgerHash());
try
{
BypassAccept bypassAccept = check ? BypassAccept::no : BypassAccept::yes;
handleNewValidation(app_, val, source, bypassAccept, m_journal);
}
catch (std::exception const& e)
{
JLOG(m_journal.warn()) << "Exception thrown for handling new validation " << val->getLedgerHash() << ": "
<< e.what();
}
catch (...)
{
JLOG(m_journal.warn()) << "Unknown exception thrown for handling new validation " << val->getLedgerHash();
}
}
catch (std::exception const& e)
{
JLOG(m_journal.warn()) << "Exception thrown for handling new validation " << val->getLedgerHash() << ": "
<< e.what();
}
catch (...)
{
JLOG(m_journal.warn()) << "Unknown exception thrown for handling new validation " << val->getLedgerHash();
}
if (bypassAccept == BypassAccept::no)
{
pendingValidations_.erase(val->getLedgerHash());
}
lock.unlock();
pubValidation(val);

View File

@@ -180,7 +180,7 @@ public:
virtual bool
isFull() = 0;
virtual void
setMode(OperatingMode om) = 0;
setMode(OperatingMode om, char const* reason) = 0;
virtual bool
isBlocked() = 0;
virtual bool

View File

@@ -53,7 +53,17 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx)
return {nullptr, tecOBJECT_NOT_FOUND};
if (hasExpired(ctx.view, (*offerSLE)[~sfExpiration]))
return {nullptr, tecEXPIRED};
{
// Before fixExpiredNFTokenOfferRemoval amendment, expired
// offers caused tecEXPIRED in preclaim, leaving them on ledger
// forever. After the amendment, we allow expired offers to
// reach doApply() where they get deleted and tecEXPIRED is
// returned.
if (!ctx.view.rules().enabled(fixExpiredNFTokenOfferRemoval))
return {nullptr, tecEXPIRED};
// Amendment enabled: return the expired offer to be handled in
// doApply
}
if ((*offerSLE)[sfAmount].negative())
return {nullptr, temBAD_OFFER};
@@ -299,7 +309,7 @@ NFTokenAcceptOffer::pay(AccountID const& from, AccountID const& to, STAmount con
{
// This should never happen, but it's easy and quick to check.
if (amount < beast::zero)
return tecINTERNAL;
return tecINTERNAL; // LCOV_EXCL_LINE
auto const result = accountSend(view(), from, to, amount, j_);
@@ -410,6 +420,39 @@ NFTokenAcceptOffer::doApply()
auto bo = loadToken(ctx_.tx[~sfNFTokenBuyOffer]);
auto so = loadToken(ctx_.tx[~sfNFTokenSellOffer]);
// With fixExpiredNFTokenOfferRemoval amendment, check for expired offers
// and delete them, returning tecEXPIRED. This ensures expired offers
// are properly cleaned up from the ledger.
if (view().rules().enabled(fixExpiredNFTokenOfferRemoval))
{
bool foundExpired = false;
auto const deleteOfferIfExpired = [this, &foundExpired](std::shared_ptr<SLE> const& offer) -> TER {
if (offer && hasExpired(view(), (*offer)[~sfExpiration]))
{
JLOG(j_.trace()) << "Offer is expired, deleting: " << offer->key();
if (!nft::deleteTokenOffer(view(), offer))
{
// LCOV_EXCL_START
JLOG(j_.fatal()) << "Unable to delete expired offer '" << offer->key() << "': ignoring";
return tecINTERNAL;
// LCOV_EXCL_STOP
}
JLOG(j_.trace()) << "Deleted offer " << offer->key();
foundExpired = true;
}
return tesSUCCESS;
};
if (auto const r = deleteOfferIfExpired(bo); !isTesSuccess(r))
return r;
if (auto const r = deleteOfferIfExpired(so); !isTesSuccess(r))
return r;
if (foundExpired)
return tecEXPIRED;
}
if (bo && !nft::deleteTokenOffer(view(), bo))
{
// LCOV_EXCL_START

View File

@@ -630,12 +630,12 @@ OverlayImpl::getOverlayInfo()
{
using namespace std::chrono;
Json::Value jv;
auto& av = jv[jss::active] = Json::Value(Json::arrayValue);
auto& av = jv["active"] = Json::Value(Json::arrayValue);
for_each([&](std::shared_ptr<PeerImp>&& sp) {
auto& pv = av.append(Json::Value(Json::objectValue));
pv[jss::public_key] = base64_encode(sp->getNodePublic().data(), sp->getNodePublic().size());
pv[jss::type] = sp->slot()->inbound() ? jss::in : jss::out;
pv[jss::type] = sp->slot()->inbound() ? "in" : "out";
pv[jss::uptime] = static_cast<std::uint32_t>(duration_cast<seconds>(sp->uptime()).count());
if (sp->crawl())
{
@@ -647,7 +647,7 @@ OverlayImpl::getOverlayInfo()
}
else
{
pv[jss::port] = sp->getRemoteAddress().port();
pv[jss::port] = std::to_string(sp->getRemoteAddress().port());
}
}