mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
Tuning and cleanups for ledger fetching
* Track stats for ledger fetch and output * Reduce entries queried on timeout * Allow duplicate node requests on timeout * Don't query deep on timeout * Adjust latency tuning * Change high latency cutoff * Set absolute limit on reply entries * Small optimizations
This commit is contained in:
committed by
Nik Bougalis
parent
61e5359231
commit
fe89c74e3b
@@ -2157,7 +2157,9 @@ PeerImp::getLedger (std::shared_ptr<protocol::TMGetLedger> const& m)
|
||||
(std::min(packet.querydepth(), 3u)) :
|
||||
(isHighLatency() ? 2 : 1);
|
||||
|
||||
for (int i = 0; i < packet.nodeids ().size (); ++i)
|
||||
for (int i = 0;
|
||||
(i < packet.nodeids().size() &&
|
||||
(reply.nodes().size() < Tuning::maxReplyNodes)); ++i)
|
||||
{
|
||||
SHAMapNodeID mn (packet.nodeids (i).data (), packet.nodeids (i).size ());
|
||||
|
||||
@@ -2248,10 +2250,17 @@ PeerImp::getScore (bool haveItem) const
|
||||
|
||||
// Score for being very likely to have the thing we are
|
||||
// look for
|
||||
// Should be roughly spRandom
|
||||
static const int spHaveItem = 10000;
|
||||
|
||||
// Score reduction for each millisecond of latency
|
||||
static const int spLatency = 100;
|
||||
// Should be roughly spRandom divided by
|
||||
// the maximum reasonable latency
|
||||
static const int spLatency = 30;
|
||||
|
||||
// Penalty for unknown latency
|
||||
// Should be roughly spRandom
|
||||
static const int spNoLatency = 8000;
|
||||
|
||||
int score = rand() % spRandom;
|
||||
|
||||
@@ -2266,6 +2275,8 @@ PeerImp::getScore (bool haveItem) const
|
||||
}
|
||||
if (latency != std::chrono::milliseconds (-1))
|
||||
score -= latency.count() * spLatency;
|
||||
else
|
||||
score -= spNoLatency;
|
||||
|
||||
return score;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user