Compare commits

...

11 Commits

Author SHA1 Message Date
Ed Hennis
3f93edc5e0 Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-25 14:55:02 -05:00
Jingchen
b124c9f7e3 refactor: Retire Flow and FlowSortStrands amendments (#6054)
Amendments activated for more than 2 years can be retired. This change retires the Flow and FlowSortStrands amendments.
2025-11-25 14:21:17 +00:00
Ed Hennis
baf62689ff Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-24 21:49:07 -05:00
Bart
b550dc00ed ci: Remove missing commits check (#6077)
This change removes the CI check for missing commits, as well as a stray path to the publish-docs workflow that isn't used in the on-trigger workflow.
2025-11-24 21:43:39 -05:00
Ed Hennis
ddf7d6cac4 Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-24 21:30:18 -05:00
Ed Hennis
fcd2ea2d6e Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-21 12:47:54 -05:00
Ed Hennis
a16aa5b12f Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-18 22:39:25 -05:00
Ed Hennis
ef2de81870 Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-15 03:08:38 -05:00
Ed Hennis
fce6757260 Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-13 12:19:10 -05:00
Ed Hennis
d759a0a2b0 Merge branch 'develop' into ximinez/fix/validator-cache 2025-11-12 14:12:51 -05:00
Ed Hennis
d2dda416e8 Use Validator List (VL) cache files in more scenarios
- If any [validator_list_keys] are not available after all
  [validator_list_sites] have had a chance to be queried, then fall
  back to loading cache files. Currently, cache files are only used if
  no sites are defined, or the request to one of them has an error. It
  does not include cases where not enough sites are defined, or if a
  site returns an invalid VL (or something else entirely).
- Resolves #5320
2025-11-10 19:53:02 -05:00
11 changed files with 68 additions and 171 deletions

View File

@@ -14,9 +14,7 @@ on:
- "master"
paths:
# These paths are unique to `on-trigger.yml`.
- ".github/workflows/reusable-check-missing-commits.yml"
- ".github/workflows/on-trigger.yml"
- ".github/workflows/publish-docs.yml"
# Keep the paths below in sync with those in `on-pr.yml`.
- ".github/actions/build-deps/**"
@@ -63,10 +61,6 @@ defaults:
shell: bash
jobs:
check-missing-commits:
if: ${{ github.event_name == 'push' && github.ref_type == 'branch' && contains(fromJSON('["develop", "release"]'), github.ref_name) }}
uses: ./.github/workflows/reusable-check-missing-commits.yml
build-test:
uses: ./.github/workflows/reusable-build-test.yml
strategy:

View File

@@ -1,62 +0,0 @@
# This workflow checks that all commits in the "master" branch are also in the
# "release" and "develop" branches, and that all commits in the "release" branch
# are also in the "develop" branch.
name: Check for missing commits
# This workflow can only be triggered by other workflows.
on: workflow_call
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-missing-commits
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
fetch-depth: 0
- name: Check for missing commits
env:
MESSAGE: |
If you are reading this, then the commits indicated above are missing
from the "develop" and/or "release" branch. Do a reverse-merge as soon
as possible. See CONTRIBUTING.md for instructions.
run: |
set -o pipefail
# Branches are ordered by how "canonical" they are. Every commit in one
# branch should be in all the branches behind it.
order=(master release develop)
branches=()
for branch in "${order[@]}"; do
# Check that the branches exist so that this job will work on forked
# repos, which don't necessarily have master and release branches.
echo "Checking if ${branch} exists."
if git ls-remote --exit-code --heads origin \
refs/heads/${branch} > /dev/null; then
branches+=(origin/${branch})
fi
done
prior=()
for branch in "${branches[@]}"; do
if [[ ${#prior[@]} -ne 0 ]]; then
echo "Checking ${prior[@]} for commits missing from ${branch}."
git log --oneline --no-merges "${prior[@]}" \
^$branch | tee -a "missing-commits.txt"
echo
fi
prior+=("${branch}")
done
if [[ $(cat missing-commits.txt | wc -l) -ne 0 ]]; then
echo "${MESSAGE}"
exit 1
fi

View File

@@ -64,8 +64,6 @@ XRPL_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes)
XRPL_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes)
// The following amendments are obsolete, but must remain supported
// because they could potentially get enabled.
@@ -124,7 +122,9 @@ XRPL_RETIRE_FEATURE(Escrow)
XRPL_RETIRE_FEATURE(EnforceInvariants)
XRPL_RETIRE_FEATURE(ExpandedSignerList)
XRPL_RETIRE_FEATURE(FeeEscalation)
XRPL_RETIRE_FEATURE(Flow)
XRPL_RETIRE_FEATURE(FlowCross)
XRPL_RETIRE_FEATURE(FlowSortStrands)
XRPL_RETIRE_FEATURE(HardenedValidations)
XRPL_RETIRE_FEATURE(ImmediateOfferKilled)
XRPL_RETIRE_FEATURE(MultiSign)

View File

@@ -267,9 +267,8 @@ public:
// strand dry until the liquidity is actually used)
// The implementation allows any single step to consume at most 1000
// offers. With the `FlowSortStrands` feature enabled, if the total
// number of offers consumed by all the steps combined exceeds 1500, the
// payment stops.
// offers.If the total number of offers consumed by all the steps
// combined exceeds 1500, the payment stops.
{
Env env(*this, features);
@@ -457,16 +456,12 @@ public:
// below the limit. However, if all the offers are consumed it would
// create a tecOVERSIZE error.
// The featureFlowSortStrands introduces a way of tracking the total
// number of consumed offers; with this feature the transaction no
// longer fails with a tecOVERSIZE error.
// The implementation allows any single step to consume at most 1000
// offers. With the `FlowSortStrands` feature enabled, if the total
// number of offers consumed by all the steps combined exceeds 1500, the
// payment stops. Since the first set of offers consumes 998 offers, the
// second set will consume 998, which is not over the limit and the
// payment stops. So 2*998, or 1996 is the expected value when
// `FlowSortStrands` is enabled.
// offers. If the total number of offers consumed by all the steps
// combined exceeds 1500, the payment stops. Since the first set of
// offers consumes 998 offers, the second set will consume 998, which is
// not over the limit and the payment stops. So 2*998, or 1996 is the
// expected value.
n_offers(env, 998, alice, XRP(1.00), USD(1));
n_offers(env, 998, alice, XRP(0.99), USD(1));
n_offers(env, 998, alice, XRP(0.98), USD(1));
@@ -474,24 +469,10 @@ public:
n_offers(env, 998, alice, XRP(0.96), USD(1));
n_offers(env, 998, alice, XRP(0.95), USD(1));
bool const withSortStrands = features[featureFlowSortStrands];
auto const expectedTER = [&]() -> TER {
if (!withSortStrands)
return TER{tecOVERSIZE};
return tesSUCCESS;
}();
env(offer(bob, USD(8000), XRP(8000)), ter(expectedTER));
env(offer(bob, USD(8000), XRP(8000)), ter(tesSUCCESS));
env.close();
auto const expectedUSD = [&] {
if (!withSortStrands)
return USD(0);
return USD(1996);
}();
env.require(balance(bob, expectedUSD));
env.require(balance(bob, USD(1996)));
}
void
@@ -507,9 +488,7 @@ public:
using namespace jtx;
auto const sa = testable_amendments();
testAll(sa);
testAll(sa - featureFlowSortStrands);
testAll(sa - featurePermissionedDEX);
testAll(sa - featureFlowSortStrands - featurePermissionedDEX);
}
};

View File

@@ -1054,7 +1054,7 @@ public:
// Charlie - queue a transaction, with a higher fee
// than default
env(noop(charlie), fee(15), queued);
checkMetrics(*this, env, 6, initQueueMax, 4, 3);
checkMetrics(*this, env, 6, initQueueMax, 4, 3, 257);
BEAST_EXPECT(env.seq(alice) == aliceSeq);
BEAST_EXPECT(env.seq(bob) == bobSeq);
@@ -4330,7 +4330,7 @@ public:
Account const ellie("ellie");
Account const fiona("fiona");
constexpr int ledgersInQueue = 20;
constexpr int ledgersInQueue = 30;
auto cfg = makeConfig(
{{"minimum_txn_in_ledger_standalone", "1"},
{"ledgers_in_queue", std::to_string(ledgersInQueue)},

View File

@@ -798,16 +798,18 @@ public:
{
// a Env FeatureBitset has *only* those features
Env env{*this, FeatureBitset{featureDynamicMPT | featureFlow}};
Env env{
*this, FeatureBitset{featureDynamicMPT | featureTokenEscrow}};
BEAST_EXPECT(env.app().config().features.size() == 2);
foreachFeature(supported, [&](uint256 const& f) {
bool const has = (f == featureDynamicMPT || f == featureFlow);
bool const has =
(f == featureDynamicMPT || f == featureTokenEscrow);
this->BEAST_EXPECT(has == hasFeature(env, f));
});
}
auto const missingSomeFeatures =
testable_amendments() - featureDynamicMPT - featureFlow;
testable_amendments() - featureDynamicMPT - featureTokenEscrow;
BEAST_EXPECT(missingSomeFeatures.count() == (supported.count() - 2));
{
// a Env supported_features_except is missing *only* those features
@@ -815,7 +817,8 @@ public:
BEAST_EXPECT(
env.app().config().features.size() == (supported.count() - 2));
foreachFeature(supported, [&](uint256 const& f) {
bool hasnot = (f == featureDynamicMPT || f == featureFlow);
bool hasnot =
(f == featureDynamicMPT || f == featureTokenEscrow);
this->BEAST_EXPECT(hasnot != hasFeature(env, f));
});
}
@@ -828,7 +831,9 @@ public:
Env env{
*this,
FeatureBitset{
featureDynamicMPT, featureFlow, *neverSupportedFeat}};
featureDynamicMPT,
featureTokenEscrow,
*neverSupportedFeat}};
// this app will have just 2 supported amendments and
// one additional never supported feature flag
@@ -836,7 +841,7 @@ public:
BEAST_EXPECT(hasFeature(env, *neverSupportedFeat));
foreachFeature(supported, [&](uint256 const& f) {
bool has = (f == featureDynamicMPT || f == featureFlow);
bool has = (f == featureDynamicMPT || f == featureTokenEscrow);
this->BEAST_EXPECT(has == hasFeature(env, f));
});
}
@@ -856,7 +861,8 @@ public:
(supported.count() - 2 + 1));
BEAST_EXPECT(hasFeature(env, *neverSupportedFeat));
foreachFeature(supported, [&](uint256 const& f) {
bool hasnot = (f == featureDynamicMPT || f == featureFlow);
bool hasnot =
(f == featureDynamicMPT || f == featureTokenEscrow);
this->BEAST_EXPECT(hasnot != hasFeature(env, f));
});
}

View File

@@ -1117,7 +1117,7 @@ class GetAmendments_test : public beast::unit_test::suite
// There should be at least 3 amendments. Don't do exact comparison
// to avoid maintenance as more amendments are added in the future.
BEAST_EXPECT(i == 254);
BEAST_EXPECT(majorities.size() >= 3);
BEAST_EXPECT(majorities.size() >= 2);
// None of the amendments should be enabled yet.
auto enableds = getEnabledAmendments(*env.closed());
@@ -1135,7 +1135,7 @@ class GetAmendments_test : public beast::unit_test::suite
break;
}
BEAST_EXPECT(i == 255);
BEAST_EXPECT(enableds.size() >= 3);
BEAST_EXPECT(enableds.size() >= 2);
}
void

View File

@@ -123,7 +123,7 @@ class Feature_test : public beast::unit_test::suite
BEAST_EXPECT(
featureToName(fixRemoveNFTokenAutoTrustLine) ==
"fixRemoveNFTokenAutoTrustLine");
BEAST_EXPECT(featureToName(featureFlow) == "Flow");
BEAST_EXPECT(featureToName(featureBatch) == "Batch");
BEAST_EXPECT(featureToName(featureDID) == "DID");
BEAST_EXPECT(
featureToName(fixIncludeKeyletFields) == "fixIncludeKeyletFields");
@@ -183,16 +183,16 @@ class Feature_test : public beast::unit_test::suite
using namespace test::jtx;
Env env{*this};
auto jrr = env.rpc("feature", "Flow")[jss::result];
auto jrr = env.rpc("feature", "fixAMMOverflowOffer")[jss::result];
BEAST_EXPECTS(jrr[jss::status] == jss::success, "status");
jrr.removeMember(jss::status);
BEAST_EXPECT(jrr.size() == 1);
BEAST_EXPECT(
jrr.isMember("740352F2412A9909880C23A559FCECEDA3BE2126FED62FC7660D6"
"28A06927F11"));
jrr.isMember("12523DF04B553A0B1AD74F42DDB741DE8DC06A03FC089A0EF197E"
"2A87F1D8107"));
auto feature = *(jrr.begin());
BEAST_EXPECTS(feature[jss::name] == "Flow", "name");
BEAST_EXPECTS(feature[jss::name] == "fixAMMOverflowOffer", "name");
BEAST_EXPECTS(!feature[jss::enabled].asBool(), "enabled");
BEAST_EXPECTS(
feature[jss::vetoed].isBool() && !feature[jss::vetoed].asBool(),
@@ -200,7 +200,7 @@ class Feature_test : public beast::unit_test::suite
BEAST_EXPECTS(feature[jss::supported].asBool(), "supported");
// feature names are case-sensitive - expect error here
jrr = env.rpc("feature", "flow")[jss::result];
jrr = env.rpc("feature", "fMM")[jss::result];
BEAST_EXPECT(jrr[jss::error] == "badFeature");
BEAST_EXPECT(jrr[jss::error_message] == "Feature unknown or invalid.");
}
@@ -419,9 +419,9 @@ class Feature_test : public beast::unit_test::suite
break;
}
// There should be at least 3 amendments. Don't do exact comparison
// There should be at least 2 amendments. Don't do exact comparison
// to avoid maintenance as more amendments are added in the future.
BEAST_EXPECT(majorities.size() >= 3);
BEAST_EXPECT(majorities.size() >= 2);
std::map<std::string, VoteBehavior> const& votes =
ripple::detail::supportedAmendments();
@@ -476,8 +476,8 @@ class Feature_test : public beast::unit_test::suite
testcase("Veto");
using namespace test::jtx;
Env env{*this, FeatureBitset{featureFlow}};
constexpr char const* featureName = "Flow";
Env env{*this, FeatureBitset{featurePriceOracle}};
constexpr char const* featureName = "fixAMMOverflowOffer";
auto jrr = env.rpc("feature", featureName)[jss::result];
if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status"))

View File

@@ -129,6 +129,11 @@ ValidatorSite::load(
{
try
{
// This is not super efficient, but it doesn't happen often.
bool found = std::ranges::any_of(sites_, [&uri](auto const& site) {
return site.loadedResource->uri == uri;
});
if (!found)
sites_.emplace_back(uri);
}
catch (std::exception const& e)
@@ -191,6 +196,17 @@ ValidatorSite::setTimer(
std::lock_guard<std::mutex> const& site_lock,
std::lock_guard<std::mutex> const& state_lock)
{
if (!sites_.empty() && //
std::ranges::all_of(sites_, [](auto const& site) {
return site.lastRefreshStatus.has_value();
}))
{
// If all of the sites have been handled at least once (including
// errors and timeouts), call missingSite, which will load the cache
// files for any lists that are still unavailable.
missingSite(site_lock);
}
auto next = std::min_element(
sites_.begin(), sites_.end(), [](Site const& a, Site const& b) {
return a.nextRefresh < b.nextRefresh;
@@ -303,13 +319,16 @@ ValidatorSite::onRequestTimeout(std::size_t siteIdx, error_code const& ec)
// processes a network error. Usually, this function runs first,
// but on extremely rare occasions, the response handler can run
// first, which will leave activeResource empty.
auto const& site = sites_[siteIdx];
auto& site = sites_[siteIdx];
if (site.activeResource)
JLOG(j_.warn()) << "Request for " << site.activeResource->uri
<< " took too long";
else
JLOG(j_.error()) << "Request took too long, but a response has "
"already been processed";
if (!site.lastRefreshStatus)
site.lastRefreshStatus.emplace(Site::Status{
clock_type::now(), ListDisposition::invalid, "timeout"});
}
std::lock_guard lock_state{state_mutex_};

View File

@@ -43,15 +43,6 @@ RippleCalc::rippleCalculate(
PaymentSandbox flowSB(&view);
auto j = l.journal("Flow");
if (!view.rules().enabled(featureFlow))
{
// The new payment engine was enabled several years ago. New transaction
// should never use the old rules. Assume this is a replay
j.fatal()
<< "Old payment rules are required for this transaction. Assuming "
"this is a replay and running with the new rules.";
}
{
bool const defaultPaths =
!pInputs ? true : pInputs->defaultPathsAllowed;

View File

@@ -433,7 +433,7 @@ public:
// add the strands in `next_` to `cur_`, sorted by theoretical quality.
// Best quality first.
cur_.clear();
if (v.rules().enabled(featureFlowSortStrands) && !next_.empty())
if (!next_.empty())
{
std::vector<std::pair<Quality, Strand const*>> strandQuals;
strandQuals.reserve(next_.size());
@@ -719,8 +719,6 @@ flow(
continue;
}
if (baseView.rules().enabled(featureFlowSortStrands))
{
XRPL_ASSERT(!best, "ripple::flow : best is unset");
if (!f.inactive)
activeStrands.push(strand);
@@ -729,36 +727,8 @@ flow(
break;
}
activeStrands.push(strand);
if (!best || best->quality < q ||
(best->quality == q && best->out < f.out))
{
// If this strand is inactive (because it consumed too many
// offers) and ends up having the best quality, remove it
// from the activeStrands. If it doesn't end up having the
// best quality, keep it active.
if (f.inactive)
{
// This should be `nextSize`, not `size`. This issue is
// fixed in featureFlowSortStrands.
markInactiveOnUse = activeStrands.size() - 1;
}
else
{
markInactiveOnUse.reset();
}
best.emplace(f.in, f.out, std::move(*f.sandbox), *strand, q);
}
}
bool const shouldBreak = [&] {
if (baseView.rules().enabled(featureFlowSortStrands))
return !best || offersConsidered >= maxOffersToConsider;
return !best;
}();
bool const shouldBreak =
!best || offersConsidered >= maxOffersToConsider;
if (best)
{