mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Compare commits
14 Commits
pratik/Cha
...
ximinez/te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14c2ceb1f9 | ||
|
|
b986395ecc | ||
|
|
8941f7f3cf | ||
|
|
eb8f2e6279 | ||
|
|
a0f044f69d | ||
|
|
d7f0653f77 | ||
|
|
f2d4de23f1 | ||
|
|
1c52385243 | ||
|
|
603ec19038 | ||
|
|
0aab9ce44c | ||
|
|
e4b18fd92b | ||
|
|
2bf218344b | ||
|
|
88e7ceef6a | ||
|
|
0e6b981a56 |
33
BUILD.md
33
BUILD.md
@@ -141,26 +141,37 @@ Alternatively, you can pull the patched recipes into the repository and use them
|
||||
locally:
|
||||
|
||||
```bash
|
||||
# Extract the version number from the lockfile.
|
||||
function extract_version {
|
||||
version=$(cat conan.lock | sed -nE "s@.+${1}/(.+)#.+@\1@p" | head -n1)
|
||||
echo ${version}
|
||||
}
|
||||
|
||||
# Define which recipes to export.
|
||||
recipes=(ed25519 grpc secp256k1 snappy soci)
|
||||
|
||||
# Selectively check out the recipes from our CCI fork.
|
||||
cd external
|
||||
mkdir -p conan-center-index
|
||||
cd conan-center-index
|
||||
git init
|
||||
git remote add origin git@github.com:XRPLF/conan-center-index.git
|
||||
git sparse-checkout init
|
||||
git sparse-checkout set recipes/ed25519
|
||||
git sparse-checkout add recipes/grpc
|
||||
git sparse-checkout add recipes/secp256k1
|
||||
git sparse-checkout add recipes/snappy
|
||||
git sparse-checkout add recipes/soci
|
||||
for recipe in ${recipes[@]}; do
|
||||
echo "Checking out ${recipe}..."
|
||||
git sparse-checkout add recipes/${recipe}/all
|
||||
done
|
||||
git fetch origin master
|
||||
git checkout master
|
||||
rm -rf .git
|
||||
cd ../..
|
||||
conan export --version 2015.03 external/conan-center-index/recipes/ed25519/all
|
||||
conan export --version 1.72.0 external/conan-center-index/recipes/grpc/all
|
||||
conan export --version 0.7.0 external/conan-center-index/recipes/secp256k1/all
|
||||
conan export --version 1.1.10 external/conan-center-index/recipes/snappy/all
|
||||
conan export --version 4.0.3 external/conan-center-index/recipes/soci/all
|
||||
|
||||
# Export the recipes into the local cache.
|
||||
for recipe in ${recipes[@]}; do
|
||||
version=$(extract_version ${recipe})
|
||||
echo "Exporting ${recipe}/${version}..."
|
||||
conan export --version $(extract_version ${recipe}) \
|
||||
external/conan-center-index/recipes/${recipe}/all
|
||||
done
|
||||
```
|
||||
|
||||
In the case we switch to a newer version of a dependency that still requires a
|
||||
|
||||
@@ -67,7 +67,6 @@ XRPL_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo
|
||||
XRPL_FIX (UniversalNumber, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo)
|
||||
XRPL_FIX (RemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes)
|
||||
XRPL_FIX (TMGetObjectByHashLimit, Supported::yes, VoteBehavior::DefaultYes)
|
||||
|
||||
// The following amendments are obsolete, but must remain supported
|
||||
// because they could potentially get enabled.
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <doctest/doctest.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <thread>
|
||||
|
||||
@@ -17,6 +18,40 @@ using namespace ripple;
|
||||
|
||||
namespace {
|
||||
|
||||
struct logger
|
||||
{
|
||||
std::string name;
|
||||
logger const* const parent = nullptr;
|
||||
static std::size_t depth;
|
||||
logger(std::string n) : name(n)
|
||||
{
|
||||
std::clog << indent() << name << " begin\n";
|
||||
++depth;
|
||||
}
|
||||
|
||||
logger(logger const& p, std::string n) : parent(&p), name(n)
|
||||
{
|
||||
std::clog << indent() << parent->name << " : " << name << " begin\n";
|
||||
++depth;
|
||||
}
|
||||
|
||||
~logger()
|
||||
{
|
||||
--depth;
|
||||
if (parent)
|
||||
std::clog << indent() << parent->name << " : " << name << " end\n";
|
||||
else
|
||||
std::clog << indent() << name << " end\n";
|
||||
}
|
||||
|
||||
std::string
|
||||
indent()
|
||||
{
|
||||
return std::string(depth, ' ');
|
||||
}
|
||||
};
|
||||
std::size_t logger::depth = 0;
|
||||
|
||||
// Simple HTTP server using Beast for testing
|
||||
class TestHTTPServer
|
||||
{
|
||||
@@ -35,6 +70,7 @@ private:
|
||||
public:
|
||||
TestHTTPServer() : acceptor_(ioc_), port_(0)
|
||||
{
|
||||
logger l("TestHTTPServer()");
|
||||
// Bind to any available port
|
||||
endpoint_ = {boost::asio::ip::tcp::v4(), 0};
|
||||
acceptor_.open(endpoint_.protocol());
|
||||
@@ -50,6 +86,7 @@ public:
|
||||
|
||||
~TestHTTPServer()
|
||||
{
|
||||
logger l("~TestHTTPServer()");
|
||||
stop();
|
||||
}
|
||||
|
||||
@@ -87,6 +124,7 @@ private:
|
||||
void
|
||||
stop()
|
||||
{
|
||||
logger l("TestHTTPServer::stop");
|
||||
running_ = false;
|
||||
acceptor_.close();
|
||||
}
|
||||
@@ -94,6 +132,7 @@ private:
|
||||
void
|
||||
accept()
|
||||
{
|
||||
logger l("TestHTTPServer::accept");
|
||||
if (!running_)
|
||||
return;
|
||||
|
||||
@@ -115,31 +154,37 @@ private:
|
||||
void
|
||||
handleConnection(boost::asio::ip::tcp::socket socket)
|
||||
{
|
||||
logger l("TestHTTPServer::handleConnection");
|
||||
try
|
||||
{
|
||||
std::optional<logger> r(std::in_place, l, "read the http request");
|
||||
// Read the HTTP request
|
||||
boost::beast::flat_buffer buffer;
|
||||
boost::beast::http::request<boost::beast::http::string_body> req;
|
||||
boost::beast::http::read(socket, buffer, req);
|
||||
|
||||
// Create response
|
||||
r.emplace(l, "create response");
|
||||
boost::beast::http::response<boost::beast::http::string_body> res;
|
||||
res.version(req.version());
|
||||
res.result(status_code_);
|
||||
res.set(boost::beast::http::field::server, "TestServer");
|
||||
|
||||
// Add custom headers
|
||||
r.emplace(l, "add custom headers");
|
||||
for (auto const& [name, value] : custom_headers_)
|
||||
{
|
||||
res.set(name, value);
|
||||
}
|
||||
|
||||
// Set body and prepare payload first
|
||||
r.emplace(l, "set body and prepare payload");
|
||||
res.body() = response_body_;
|
||||
res.prepare_payload();
|
||||
|
||||
// Override Content-Length with custom headers after prepare_payload
|
||||
// This allows us to test case-insensitive header parsing
|
||||
r.emplace(l, "override content-length");
|
||||
for (auto const& [name, value] : custom_headers_)
|
||||
{
|
||||
if (boost::iequals(name, "Content-Length"))
|
||||
@@ -150,19 +195,25 @@ private:
|
||||
}
|
||||
|
||||
// Send response
|
||||
r.emplace(l, "send response");
|
||||
boost::beast::http::write(socket, res);
|
||||
|
||||
// Shutdown socket gracefully
|
||||
r.emplace(l, "shutdown socket");
|
||||
boost::system::error_code ec;
|
||||
socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec);
|
||||
}
|
||||
catch (std::exception const&)
|
||||
{
|
||||
// Connection handling errors are expected
|
||||
logger c(l, "catch");
|
||||
}
|
||||
|
||||
if (running_)
|
||||
{
|
||||
logger r(l, "accept");
|
||||
accept();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -176,12 +227,16 @@ runHTTPTest(
|
||||
std::string& result_data,
|
||||
boost::system::error_code& result_error)
|
||||
{
|
||||
logger l("runHTTPTest");
|
||||
// Create a null journal for testing
|
||||
beast::Journal j{beast::Journal::getNullSink()};
|
||||
|
||||
std::optional<logger> r(std::in_place, l, "initializeSSLContext");
|
||||
|
||||
// Initialize HTTPClient SSL context
|
||||
HTTPClient::initializeSSLContext("", "", false, j);
|
||||
|
||||
r.emplace(l, "HTTPClient::get");
|
||||
HTTPClient::get(
|
||||
false, // no SSL
|
||||
server.ioc(),
|
||||
@@ -206,6 +261,7 @@ runHTTPTest(
|
||||
while (!completed &&
|
||||
std::chrono::steady_clock::now() - start < std::chrono::seconds(10))
|
||||
{
|
||||
r.emplace(l, "ioc.run_one");
|
||||
if (server.ioc().run_one() == 0)
|
||||
{
|
||||
break;
|
||||
@@ -219,6 +275,8 @@ runHTTPTest(
|
||||
|
||||
TEST_CASE("HTTPClient case insensitive Content-Length")
|
||||
{
|
||||
logger l("HTTPClient case insensitive Content-Length");
|
||||
|
||||
// Test different cases of Content-Length header
|
||||
std::vector<std::string> header_cases = {
|
||||
"Content-Length", // Standard case
|
||||
@@ -230,6 +288,7 @@ TEST_CASE("HTTPClient case insensitive Content-Length")
|
||||
|
||||
for (auto const& header_name : header_cases)
|
||||
{
|
||||
logger h(l, header_name);
|
||||
TestHTTPServer server;
|
||||
std::string test_body = "Hello World!";
|
||||
server.setResponseBody(test_body);
|
||||
@@ -258,6 +317,7 @@ TEST_CASE("HTTPClient case insensitive Content-Length")
|
||||
|
||||
TEST_CASE("HTTPClient basic HTTP request")
|
||||
{
|
||||
logger l("HTTPClient basic HTTP request");
|
||||
TestHTTPServer server;
|
||||
std::string test_body = "Test response body";
|
||||
server.setResponseBody(test_body);
|
||||
@@ -279,6 +339,7 @@ TEST_CASE("HTTPClient basic HTTP request")
|
||||
|
||||
TEST_CASE("HTTPClient empty response")
|
||||
{
|
||||
logger l("HTTPClient empty response");
|
||||
TestHTTPServer server;
|
||||
server.setResponseBody(""); // Empty body
|
||||
server.setHeader("Content-Length", "0");
|
||||
@@ -299,6 +360,7 @@ TEST_CASE("HTTPClient empty response")
|
||||
|
||||
TEST_CASE("HTTPClient different status codes")
|
||||
{
|
||||
logger l("HTTPClient different status codes");
|
||||
std::vector<unsigned int> status_codes = {200, 404, 500};
|
||||
|
||||
for (auto status : status_codes)
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
#include <xrpl/basics/base64.h>
|
||||
#include <xrpl/basics/random.h>
|
||||
#include <xrpl/basics/safe_cast.h>
|
||||
#include <xrpl/protocol/Feature.h>
|
||||
#include <xrpl/protocol/TxFlags.h>
|
||||
#include <xrpl/protocol/digest.h>
|
||||
|
||||
@@ -2590,51 +2589,9 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
||||
reply.set_ledgerhash(packet.ledgerhash());
|
||||
}
|
||||
|
||||
// Get validated rules to check if the fix is enabled
|
||||
auto const rules = app_.getLedgerMaster().getValidatedRules();
|
||||
|
||||
// Charge resource fee based on request size when fix is enabled
|
||||
if (rules.enabled(fixTMGetObjectByHashLimit))
|
||||
{
|
||||
// Enforce per-request object cap
|
||||
if (packet.objects_size() > Tuning::maxGetObjectByHash)
|
||||
{
|
||||
fee_.update(Resource::feeMalformedRequest, "too many objects");
|
||||
return;
|
||||
}
|
||||
|
||||
// Charge heavier fee for large requests (>256 objects)
|
||||
if (packet.objects_size() > 256)
|
||||
{
|
||||
fee_.update(
|
||||
Resource::feeHeavyBurdenPeer,
|
||||
"large get object by hash request");
|
||||
}
|
||||
else if (packet.objects_size() > 64)
|
||||
{
|
||||
fee_.update(
|
||||
Resource::feeModerateBurdenPeer,
|
||||
"moderate get object by hash request");
|
||||
}
|
||||
else
|
||||
{
|
||||
fee_.update(
|
||||
Resource::feeTrivialPeer,
|
||||
"small get object by hash request");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Legacy behavior: charge moderate fee for all requests
|
||||
fee_.update(
|
||||
Resource::feeModerateBurdenPeer,
|
||||
"received a get object by hash request");
|
||||
}
|
||||
|
||||
// Track reply bytes and stop when over budget (16 MiB) when fix is enabled
|
||||
std::size_t replyBudgetBytes =
|
||||
rules.enabled(fixTMGetObjectByHashLimit) ? megabytes(16) : 0;
|
||||
std::size_t replyBytes = 0;
|
||||
fee_.update(
|
||||
Resource::feeModerateBurdenPeer,
|
||||
" received a get object by hash request");
|
||||
|
||||
// This is a very minimal implementation
|
||||
for (int i = 0; i < packet.objects_size(); ++i)
|
||||
@@ -2649,28 +2606,17 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
||||
auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
|
||||
if (nodeObject)
|
||||
{
|
||||
auto const dataSz = nodeObject->getData().size();
|
||||
// Check if adding this object would exceed the reply budget
|
||||
// (only when fix is enabled)
|
||||
if (replyBudgetBytes > 0 &&
|
||||
replyBytes + dataSz + 64 > replyBudgetBytes)
|
||||
break;
|
||||
|
||||
protocol::TMIndexedObject& newObj = *reply.add_objects();
|
||||
newObj.set_hash(hash.begin(), hash.size());
|
||||
newObj.set_data(
|
||||
&nodeObject->getData().front(),
|
||||
dataSz);
|
||||
nodeObject->getData().size());
|
||||
|
||||
if (obj.has_nodeid())
|
||||
newObj.set_index(obj.nodeid());
|
||||
if (obj.has_ledgerseq())
|
||||
newObj.set_ledgerseq(obj.ledgerseq());
|
||||
|
||||
// Track reply bytes when fix is enabled
|
||||
if (replyBudgetBytes > 0)
|
||||
replyBytes += dataSz + 64; // include modest overhead estimate
|
||||
|
||||
// VFALCO NOTE "seq" in the message is obsolete
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,9 +22,6 @@ enum {
|
||||
/** The hard cap on the number of ledger entries in a single reply. */
|
||||
hardMaxReplyNodes = 12288,
|
||||
|
||||
/** Hard cap on TMGetObjectByHash objects per request (non-TRANSACTIONS). */
|
||||
maxGetObjectByHash = 1024,
|
||||
|
||||
/** How many timer intervals a sendq has to stay large before we disconnect
|
||||
*/
|
||||
sendqIntervals = 4,
|
||||
|
||||
Reference in New Issue
Block a user