mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Improve transaction relaying logic:
The existing logic involves every server sending every transaction that it receives to all its peers (except the one that it received a transaction from). This commit instead uses a randomized algorithm, where a node will randomly select peers to relay a given transaction to, caching the list of transaction hashes that are not relayed and forwading them to peers once every second. Peers can then determine whether there are transactions that they have not seen and can request them from the node which has them. It is expected that this feature will further reduce the bandwidth needed to operate a server.
This commit is contained in:
committed by
Nik Bougalis
parent
0d17dd8228
commit
ea145d12c7
@@ -21,6 +21,7 @@
|
||||
#include <ripple/app/ledger/InboundLedgers.h>
|
||||
#include <ripple/app/ledger/InboundTransactions.h>
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/ledger/TransactionMaster.h>
|
||||
#include <ripple/app/misc/HashRouter.h>
|
||||
#include <ripple/app/misc/LoadFeeTrack.h>
|
||||
#include <ripple/app/misc/NetworkOPs.h>
|
||||
@@ -108,6 +109,10 @@ PeerImp::PeerImp(
|
||||
app_.config().COMPRESSION)
|
||||
? Compressed::On
|
||||
: Compressed::Off)
|
||||
, txReduceRelayEnabled_(peerFeatureEnabled(
|
||||
headers_,
|
||||
FEATURE_TXRR,
|
||||
app_.config().TX_REDUCE_RELAY_ENABLE))
|
||||
, vpReduceRelayEnabled_(peerFeatureEnabled(
|
||||
headers_,
|
||||
FEATURE_VPRR,
|
||||
@@ -118,11 +123,13 @@ PeerImp::PeerImp(
|
||||
app_.config().LEDGER_REPLAY))
|
||||
, ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
|
||||
{
|
||||
JLOG(journal_.debug()) << " compression enabled "
|
||||
<< (compressionEnabled_ == Compressed::On)
|
||||
<< " vp reduce-relay enabled "
|
||||
<< vpReduceRelayEnabled_ << " on " << remote_address_
|
||||
<< " " << id_;
|
||||
JLOG(journal_.info()) << "compression enabled "
|
||||
<< (compressionEnabled_ == Compressed::On)
|
||||
<< " vp reduce-relay enabled "
|
||||
<< vpReduceRelayEnabled_
|
||||
<< " tx reduce-relay enabled "
|
||||
<< txReduceRelayEnabled_ << " on " << remote_address_
|
||||
<< " " << id_;
|
||||
}
|
||||
|
||||
PeerImp::~PeerImp()
|
||||
@@ -285,6 +292,54 @@ PeerImp::send(std::shared_ptr<Message> const& m)
|
||||
std::placeholders::_2)));
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::sendTxQueue()
|
||||
{
|
||||
if (!strand_.running_in_this_thread())
|
||||
return post(
|
||||
strand_, std::bind(&PeerImp::sendTxQueue, shared_from_this()));
|
||||
|
||||
if (!txQueue_.empty())
|
||||
{
|
||||
protocol::TMHaveTransactions ht;
|
||||
std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
|
||||
ht.add_hashes(hash.data(), hash.size());
|
||||
});
|
||||
JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
|
||||
txQueue_.clear();
|
||||
send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::addTxQueue(uint256 const& hash)
|
||||
{
|
||||
if (!strand_.running_in_this_thread())
|
||||
return post(
|
||||
strand_, std::bind(&PeerImp::addTxQueue, shared_from_this(), hash));
|
||||
|
||||
if (txQueue_.size() == reduce_relay::MAX_TX_QUEUE_SIZE)
|
||||
{
|
||||
JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
|
||||
sendTxQueue();
|
||||
}
|
||||
|
||||
txQueue_.insert(hash);
|
||||
JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::removeTxQueue(uint256 const& hash)
|
||||
{
|
||||
if (!strand_.running_in_this_thread())
|
||||
return post(
|
||||
strand_,
|
||||
std::bind(&PeerImp::removeTxQueue, shared_from_this(), hash));
|
||||
|
||||
auto removed = txQueue_.erase(hash);
|
||||
JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::charge(Resource::Charge const& fee)
|
||||
{
|
||||
@@ -967,8 +1022,25 @@ PeerImp::onMessageBegin(
|
||||
load_event_ =
|
||||
app_.getJobQueue().makeLoadEvent(jtPEER, protocolMessageName(type));
|
||||
fee_ = Resource::feeLightPeer;
|
||||
overlay_.reportTraffic(
|
||||
TrafficCount::categorize(*m, type, true), true, static_cast<int>(size));
|
||||
auto const category = TrafficCount::categorize(*m, type, true);
|
||||
overlay_.reportTraffic(category, true, static_cast<int>(size));
|
||||
using namespace protocol;
|
||||
if ((type == MessageType::mtTRANSACTION ||
|
||||
type == MessageType::mtHAVE_TRANSACTIONS ||
|
||||
type == MessageType::mtTRANSACTIONS ||
|
||||
// GET_OBJECTS
|
||||
category == TrafficCount::category::get_transactions ||
|
||||
// GET_LEDGER
|
||||
category == TrafficCount::category::ld_tsc_get ||
|
||||
category == TrafficCount::category::ld_tsc_share ||
|
||||
// LEDGER_DATA
|
||||
category == TrafficCount::category::gl_tsc_share ||
|
||||
category == TrafficCount::category::gl_tsc_get) &&
|
||||
(txReduceRelayEnabled() || app_.config().TX_REDUCE_RELAY_METRICS))
|
||||
{
|
||||
overlay_.addTxMetrics(
|
||||
static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
|
||||
}
|
||||
JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
|
||||
<< uncompressed_size << " " << isCompressed;
|
||||
}
|
||||
@@ -1440,6 +1512,14 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMEndpoints> const& m)
|
||||
|
||||
void
|
||||
PeerImp::onMessage(std::shared_ptr<protocol::TMTransaction> const& m)
|
||||
{
|
||||
handleTransaction(m, true);
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::handleTransaction(
|
||||
std::shared_ptr<protocol::TMTransaction> const& m,
|
||||
bool eraseTxQueue)
|
||||
{
|
||||
if (tracking_.load() == Tracking::diverged)
|
||||
return;
|
||||
@@ -1472,6 +1552,11 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMTransaction> const& m)
|
||||
JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
|
||||
}
|
||||
|
||||
// Erase only if the server has seen this tx. If the server has not
|
||||
// seen this tx then the tx could not has been queued for this peer.
|
||||
else if (eraseTxQueue && txReduceRelayEnabled())
|
||||
removeTxQueue(txID);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2509,6 +2594,9 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
||||
{
|
||||
protocol::TMGetObjectByHash& packet = *m;
|
||||
|
||||
JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
|
||||
<< " " << packet.objects_size();
|
||||
|
||||
if (packet.query())
|
||||
{
|
||||
// this is a query
|
||||
@@ -2524,6 +2612,25 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
||||
return;
|
||||
}
|
||||
|
||||
if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
|
||||
{
|
||||
if (!txReduceRelayEnabled())
|
||||
{
|
||||
JLOG(p_journal_.error())
|
||||
<< "TMGetObjectByHash: tx reduce-relay is disabled";
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
jtREQUESTED_TXN, "doTransactions", [weak, m](Job&) {
|
||||
if (auto peer = weak.lock())
|
||||
peer->doTransactions(m);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
fee_ = Resource::feeMediumBurdenPeer;
|
||||
|
||||
protocol::TMGetObjectByHash reply;
|
||||
@@ -2644,6 +2751,98 @@ PeerImp::onMessage(std::shared_ptr<protocol::TMGetObjectByHash> const& m)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::onMessage(std::shared_ptr<protocol::TMHaveTransactions> const& m)
|
||||
{
|
||||
if (!txReduceRelayEnabled())
|
||||
{
|
||||
JLOG(p_journal_.error())
|
||||
<< "TMHaveTransactions: tx reduce-relay is disabled";
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
std::weak_ptr<PeerImp> weak = shared_from_this();
|
||||
app_.getJobQueue().addJob(
|
||||
jtMISSING_TXN, "handleHaveTransactions", [weak, m](Job&) {
|
||||
if (auto peer = weak.lock())
|
||||
peer->handleHaveTransactions(m);
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::handleHaveTransactions(
|
||||
std::shared_ptr<protocol::TMHaveTransactions> const& m)
|
||||
{
|
||||
protocol::TMGetObjectByHash tmBH;
|
||||
tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
|
||||
tmBH.set_query(true);
|
||||
|
||||
JLOG(p_journal_.trace())
|
||||
<< "received TMHaveTransactions " << m->hashes_size();
|
||||
|
||||
for (std::uint32_t i = 0; i < m->hashes_size(); i++)
|
||||
{
|
||||
if (!stringIsUint256Sized(m->hashes(i)))
|
||||
{
|
||||
JLOG(p_journal_.error())
|
||||
<< "TMHaveTransactions with invalid hash size";
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
uint256 hash(m->hashes(i));
|
||||
|
||||
auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
|
||||
|
||||
JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
|
||||
|
||||
if (!txn)
|
||||
{
|
||||
JLOG(p_journal_.debug()) << "adding transaction to request";
|
||||
|
||||
auto obj = tmBH.add_objects();
|
||||
obj->set_hash(hash.data(), hash.size());
|
||||
}
|
||||
else
|
||||
{
|
||||
// Erase only if a peer has seen this tx. If the peer has not
|
||||
// seen this tx then the tx could not has been queued for this
|
||||
// peer.
|
||||
removeTxQueue(hash);
|
||||
}
|
||||
}
|
||||
|
||||
JLOG(p_journal_.trace())
|
||||
<< "transaction request object is " << tmBH.objects_size();
|
||||
|
||||
if (tmBH.objects_size() > 0)
|
||||
send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::onMessage(std::shared_ptr<protocol::TMTransactions> const& m)
|
||||
{
|
||||
if (!txReduceRelayEnabled())
|
||||
{
|
||||
JLOG(p_journal_.error())
|
||||
<< "TMTransactions: tx reduce-relay is disabled";
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
JLOG(p_journal_.trace())
|
||||
<< "received TMTransactions " << m->transactions_size();
|
||||
|
||||
overlay_.addTxMetrics(m->transactions_size());
|
||||
|
||||
for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
|
||||
handleTransaction(
|
||||
std::shared_ptr<protocol::TMTransaction>(
|
||||
m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
|
||||
false);
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
|
||||
{
|
||||
@@ -2740,6 +2939,61 @@ PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::doTransactions(
|
||||
std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
|
||||
{
|
||||
protocol::TMTransactions reply;
|
||||
|
||||
JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
|
||||
<< packet->objects_size();
|
||||
|
||||
if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
|
||||
{
|
||||
JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
|
||||
{
|
||||
auto const& obj = packet->objects(i);
|
||||
|
||||
if (!stringIsUint256Sized(obj.hash()))
|
||||
{
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
uint256 hash(obj.hash());
|
||||
|
||||
auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
|
||||
|
||||
if (!txn)
|
||||
{
|
||||
JLOG(p_journal_.error()) << "doTransactions, transaction not found "
|
||||
<< Slice(hash.data(), hash.size());
|
||||
fee_ = Resource::feeInvalidRequest;
|
||||
return;
|
||||
}
|
||||
|
||||
Serializer s;
|
||||
auto tx = reply.add_transactions();
|
||||
auto sttx = txn->getSTransaction();
|
||||
sttx->add(s);
|
||||
tx->set_rawtransaction(s.data(), s.size());
|
||||
tx->set_status(
|
||||
txn->getStatus() == INCLUDED ? protocol::tsCURRENT
|
||||
: protocol::tsNEW);
|
||||
tx->set_receivetimestamp(
|
||||
app_.timeKeeper().now().time_since_epoch().count());
|
||||
tx->set_deferred(txn->getSubmitResult().queued);
|
||||
}
|
||||
|
||||
if (reply.transactions_size() > 0)
|
||||
send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
|
||||
}
|
||||
|
||||
void
|
||||
PeerImp::checkTransaction(
|
||||
int flags,
|
||||
|
||||
Reference in New Issue
Block a user