mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Compare commits
16 Commits
pratik/Cha
...
ximinez/em
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f267c3bc9 | ||
|
|
b986395ecc | ||
|
|
bface8d5d6 | ||
|
|
24174f6ea7 | ||
|
|
cf3ad16bdf | ||
|
|
a4046aa135 | ||
|
|
46f6332e60 | ||
|
|
ff3c2bf2f9 | ||
|
|
379e1ed555 | ||
|
|
e9fb99056b | ||
|
|
e9fa9d7aa6 | ||
|
|
2c3f169dec | ||
|
|
23565405ee | ||
|
|
a5d08b0cd5 | ||
|
|
7bf3f543b3 | ||
|
|
c773288df5 |
33
BUILD.md
33
BUILD.md
@@ -141,26 +141,37 @@ Alternatively, you can pull the patched recipes into the repository and use them
|
|||||||
locally:
|
locally:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# Extract the version number from the lockfile.
|
||||||
|
function extract_version {
|
||||||
|
version=$(cat conan.lock | sed -nE "s@.+${1}/(.+)#.+@\1@p" | head -n1)
|
||||||
|
echo ${version}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Define which recipes to export.
|
||||||
|
recipes=(ed25519 grpc secp256k1 snappy soci)
|
||||||
|
|
||||||
|
# Selectively check out the recipes from our CCI fork.
|
||||||
cd external
|
cd external
|
||||||
mkdir -p conan-center-index
|
mkdir -p conan-center-index
|
||||||
cd conan-center-index
|
cd conan-center-index
|
||||||
git init
|
git init
|
||||||
git remote add origin git@github.com:XRPLF/conan-center-index.git
|
git remote add origin git@github.com:XRPLF/conan-center-index.git
|
||||||
git sparse-checkout init
|
git sparse-checkout init
|
||||||
git sparse-checkout set recipes/ed25519
|
for recipe in ${recipes[@]}; do
|
||||||
git sparse-checkout add recipes/grpc
|
echo "Checking out ${recipe}..."
|
||||||
git sparse-checkout add recipes/secp256k1
|
git sparse-checkout add recipes/${recipe}/all
|
||||||
git sparse-checkout add recipes/snappy
|
done
|
||||||
git sparse-checkout add recipes/soci
|
|
||||||
git fetch origin master
|
git fetch origin master
|
||||||
git checkout master
|
git checkout master
|
||||||
rm -rf .git
|
|
||||||
cd ../..
|
cd ../..
|
||||||
conan export --version 2015.03 external/conan-center-index/recipes/ed25519/all
|
|
||||||
conan export --version 1.72.0 external/conan-center-index/recipes/grpc/all
|
# Export the recipes into the local cache.
|
||||||
conan export --version 0.7.0 external/conan-center-index/recipes/secp256k1/all
|
for recipe in ${recipes[@]}; do
|
||||||
conan export --version 1.1.10 external/conan-center-index/recipes/snappy/all
|
version=$(extract_version ${recipe})
|
||||||
conan export --version 4.0.3 external/conan-center-index/recipes/soci/all
|
echo "Exporting ${recipe}/${version}..."
|
||||||
|
conan export --version $(extract_version ${recipe}) \
|
||||||
|
external/conan-center-index/recipes/${recipe}/all
|
||||||
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
In the case we switch to a newer version of a dependency that still requires a
|
In the case we switch to a newer version of a dependency that still requires a
|
||||||
|
|||||||
@@ -67,7 +67,6 @@ XRPL_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo
|
|||||||
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
|
||||||
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
|
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
|
||||||
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
|
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
|
||||||
XRPL_FIX (TMGetObjectByHashLimit, Supported::yes, VoteBehavior::DefaultYes)
|
|
||||||
|
|
||||||
// The following amendments are obsolete, but must remain supported
|
// The following amendments are obsolete, but must remain supported
|
||||||
// because they could potentially get enabled.
|
// because they could potentially get enabled.
|
||||||
|
|||||||
@@ -256,6 +256,7 @@ ApplyView::dirRemove(
|
|||||||
uint256 const& key,
|
uint256 const& key,
|
||||||
bool keepRoot)
|
bool keepRoot)
|
||||||
{
|
{
|
||||||
|
keepRoot = false;
|
||||||
auto node = peek(keylet::page(directory, page));
|
auto node = peek(keylet::page(directory, page));
|
||||||
|
|
||||||
if (!node)
|
if (!node)
|
||||||
|
|||||||
@@ -3553,4 +3553,42 @@ ValidVault::finalize(
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
void
|
||||||
|
NoEmptyDirectory::visitEntry(
|
||||||
|
bool isDelete,
|
||||||
|
std::shared_ptr<SLE const> const& before,
|
||||||
|
std::shared_ptr<SLE const> const& after)
|
||||||
|
{
|
||||||
|
if (isDelete)
|
||||||
|
return;
|
||||||
|
if (before && before->getType() != ltDIR_NODE)
|
||||||
|
return;
|
||||||
|
if (after && after->getType() != ltDIR_NODE)
|
||||||
|
return;
|
||||||
|
if (!after->isFieldPresent(sfOwner))
|
||||||
|
// Not an account dir
|
||||||
|
return;
|
||||||
|
|
||||||
|
bad_ = after->at(sfIndexes).empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
NoEmptyDirectory::finalize(
|
||||||
|
STTx const& tx,
|
||||||
|
TER const result,
|
||||||
|
XRPAmount const,
|
||||||
|
ReadView const& view,
|
||||||
|
beast::Journal const& j)
|
||||||
|
{
|
||||||
|
if (bad_)
|
||||||
|
{
|
||||||
|
JLOG(j.fatal()) << "Invariant failed: empty owner directory.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace ripple
|
} // namespace ripple
|
||||||
|
|||||||
@@ -901,6 +901,30 @@ public:
|
|||||||
beast::Journal const&);
|
beast::Journal const&);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Invariants: An account's directory should never be empty
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
class NoEmptyDirectory
|
||||||
|
{
|
||||||
|
bool bad_ = false;
|
||||||
|
|
||||||
|
public:
|
||||||
|
void
|
||||||
|
visitEntry(
|
||||||
|
bool,
|
||||||
|
std::shared_ptr<SLE const> const&,
|
||||||
|
std::shared_ptr<SLE const> const&);
|
||||||
|
|
||||||
|
bool
|
||||||
|
finalize(
|
||||||
|
STTx const&,
|
||||||
|
TER const,
|
||||||
|
XRPAmount const,
|
||||||
|
ReadView const&,
|
||||||
|
beast::Journal const&);
|
||||||
|
};
|
||||||
|
|
||||||
// additional invariant checks can be declared above and then added to this
|
// additional invariant checks can be declared above and then added to this
|
||||||
// tuple
|
// tuple
|
||||||
using InvariantChecks = std::tuple<
|
using InvariantChecks = std::tuple<
|
||||||
@@ -927,7 +951,8 @@ using InvariantChecks = std::tuple<
|
|||||||
ValidPseudoAccounts,
|
ValidPseudoAccounts,
|
||||||
ValidLoanBroker,
|
ValidLoanBroker,
|
||||||
ValidLoan,
|
ValidLoan,
|
||||||
ValidVault>;
|
ValidVault,
|
||||||
|
NoEmptyDirectory>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief get a tuple of all invariant checks
|
* @brief get a tuple of all invariant checks
|
||||||
|
|||||||
@@ -18,7 +18,6 @@
|
|||||||
#include <xrpl/basics/base64.h>
|
#include <xrpl/basics/base64.h>
|
||||||
#include <xrpl/basics/random.h>
|
#include <xrpl/basics/random.h>
|
||||||
#include <xrpl/basics/safe_cast.h>
|
#include <xrpl/basics/safe_cast.h>
|
||||||
#include <xrpl/protocol/Feature.h>
|
|
||||||
#include <xrpl/protocol/TxFlags.h>
|
#include <xrpl/protocol/TxFlags.h>
|
||||||
#include <xrpl/protocol/digest.h>
|
#include <xrpl/protocol/digest.h>
|
||||||
|
|
||||||
@@ -2590,51 +2589,9 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
|||||||
reply.set_ledgerhash(packet.ledgerhash());
|
reply.set_ledgerhash(packet.ledgerhash());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get validated rules to check if the fix is enabled
|
fee_.update(
|
||||||
auto const rules = app_.getLedgerMaster().getValidatedRules();
|
Resource::feeModerateBurdenPeer,
|
||||||
|
" received a get object by hash request");
|
||||||
// Charge resource fee based on request size when fix is enabled
|
|
||||||
if (rules.enabled(fixTMGetObjectByHashLimit))
|
|
||||||
{
|
|
||||||
// Enforce per-request object cap
|
|
||||||
if (packet.objects_size() > Tuning::maxGetObjectByHash)
|
|
||||||
{
|
|
||||||
fee_.update(Resource::feeMalformedRequest, "too many objects");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Charge heavier fee for large requests (>256 objects)
|
|
||||||
if (packet.objects_size() > 256)
|
|
||||||
{
|
|
||||||
fee_.update(
|
|
||||||
Resource::feeHeavyBurdenPeer,
|
|
||||||
"large get object by hash request");
|
|
||||||
}
|
|
||||||
else if (packet.objects_size() > 64)
|
|
||||||
{
|
|
||||||
fee_.update(
|
|
||||||
Resource::feeModerateBurdenPeer,
|
|
||||||
"moderate get object by hash request");
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
fee_.update(
|
|
||||||
Resource::feeTrivialPeer,
|
|
||||||
"small get object by hash request");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// Legacy behavior: charge moderate fee for all requests
|
|
||||||
fee_.update(
|
|
||||||
Resource::feeModerateBurdenPeer,
|
|
||||||
"received a get object by hash request");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Track reply bytes and stop when over budget (16 MiB) when fix is enabled
|
|
||||||
std::size_t replyBudgetBytes =
|
|
||||||
rules.enabled(fixTMGetObjectByHashLimit) ? megabytes(16) : 0;
|
|
||||||
std::size_t replyBytes = 0;
|
|
||||||
|
|
||||||
// This is a very minimal implementation
|
// This is a very minimal implementation
|
||||||
for (int i = 0; i < packet.objects_size(); ++i)
|
for (int i = 0; i < packet.objects_size(); ++i)
|
||||||
@@ -2649,28 +2606,17 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
|||||||
auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
|
auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
|
||||||
if (nodeObject)
|
if (nodeObject)
|
||||||
{
|
{
|
||||||
auto const dataSz = nodeObject->getData().size();
|
|
||||||
// Check if adding this object would exceed the reply budget
|
|
||||||
// (only when fix is enabled)
|
|
||||||
if (replyBudgetBytes > 0 &&
|
|
||||||
replyBytes + dataSz + 64 > replyBudgetBytes)
|
|
||||||
break;
|
|
||||||
|
|
||||||
protocol::TMIndexedObject& newObj = *reply.add_objects();
|
protocol::TMIndexedObject& newObj = *reply.add_objects();
|
||||||
newObj.set_hash(hash.begin(), hash.size());
|
newObj.set_hash(hash.begin(), hash.size());
|
||||||
newObj.set_data(
|
newObj.set_data(
|
||||||
&nodeObject->getData().front(),
|
&nodeObject->getData().front(),
|
||||||
dataSz);
|
nodeObject->getData().size());
|
||||||
|
|
||||||
if (obj.has_nodeid())
|
if (obj.has_nodeid())
|
||||||
newObj.set_index(obj.nodeid());
|
newObj.set_index(obj.nodeid());
|
||||||
if (obj.has_ledgerseq())
|
if (obj.has_ledgerseq())
|
||||||
newObj.set_ledgerseq(obj.ledgerseq());
|
newObj.set_ledgerseq(obj.ledgerseq());
|
||||||
|
|
||||||
// Track reply bytes when fix is enabled
|
|
||||||
if (replyBudgetBytes > 0)
|
|
||||||
replyBytes += dataSz + 64; // include modest overhead estimate
|
|
||||||
|
|
||||||
// VFALCO NOTE "seq" in the message is obsolete
|
// VFALCO NOTE "seq" in the message is obsolete
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,9 +22,6 @@ enum {
|
|||||||
/** The hard cap on the number of ledger entries in a single reply. */
|
/** The hard cap on the number of ledger entries in a single reply. */
|
||||||
hardMaxReplyNodes = 12288,
|
hardMaxReplyNodes = 12288,
|
||||||
|
|
||||||
/** Hard cap on TMGetObjectByHash objects per request (non-TRANSACTIONS). */
|
|
||||||
maxGetObjectByHash = 1024,
|
|
||||||
|
|
||||||
/** How many timer intervals a sendq has to stay large before we disconnect
|
/** How many timer intervals a sendq has to stay large before we disconnect
|
||||||
*/
|
*/
|
||||||
sendqIntervals = 4,
|
sendqIntervals = 4,
|
||||||
|
|||||||
Reference in New Issue
Block a user