Restrict source files to 80 columns.

This commit is contained in:
Tom Ritchford
2015-08-07 18:17:10 -04:00
committed by Nik Bougalis
parent df728cd2cd
commit 96c13f0d98
26 changed files with 480 additions and 271 deletions

View File

@@ -45,12 +45,13 @@ class AcceptedLedger
public:
using pointer = std::shared_ptr<AcceptedLedger>;
using ret = const pointer&;
using map_t = std::map<int, AcceptedLedgerTx::pointer>; // Must be an ordered map!
using map_t = std::map<int, AcceptedLedgerTx::pointer>;
// mapt_t must be an ordered map!
using value_type = map_t::value_type;
using const_iterator = map_t::const_iterator;
public:
static pointer makeAcceptedLedger (std::shared_ptr<ReadView const> const& ledger);
static pointer makeAcceptedLedger (std::shared_ptr<ReadView const> const&);
static void sweep ()
{
s_cache.sweep ();

View File

@@ -55,7 +55,7 @@ public:
std::shared_ptr<ReadView const> const& ledger,
std::shared_ptr<STTx const> const&,
std::shared_ptr<STObject const> const&);
AcceptedLedgerTx (std::shared_ptr<ReadView const> const& ledger, STTx::ref, TER result);
AcceptedLedgerTx (std::shared_ptr<ReadView const> const&, STTx::ref, TER);
std::shared_ptr <STTx const> const& getTxn () const
{

View File

@@ -35,8 +35,9 @@ ConsensusTransSetSF::ConsensusTransSetSF (NodeCache& nodeCache)
{
}
void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNodeID& id, uint256 const& nodeHash,
Blob& nodeData, SHAMapTreeNode::TNType type)
void ConsensusTransSetSF::gotNode (
bool fromFilter, const SHAMapNodeID& id, uint256 const& nodeHash,
Blob& nodeData, SHAMapTreeNode::TNType type)
{
if (fromFilter)
return;
@@ -46,7 +47,8 @@ void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNodeID& id, uint
if ((type == SHAMapTreeNode::tnTRANSACTION_NM) && (nodeData.size () > 16))
{
// this is a transaction, and we didn't have it
WriteLog (lsDEBUG, TransactionAcquire) << "Node on our acquiring TX set is TXN we may not have";
WriteLog (lsDEBUG, TransactionAcquire)
<< "Node on our acquiring TX set is TXN we may not have";
try
{
@@ -62,24 +64,26 @@ void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNodeID& id, uint
}
catch (...)
{
WriteLog (lsWARNING, TransactionAcquire) << "Fetched invalid transaction in proposed set";
WriteLog (lsWARNING, TransactionAcquire)
<< "Fetched invalid transaction in proposed set";
}
}
}
bool ConsensusTransSetSF::haveNode (const SHAMapNodeID& id, uint256 const& nodeHash,
Blob& nodeData)
bool ConsensusTransSetSF::haveNode (
const SHAMapNodeID& id, uint256 const& nodeHash, Blob& nodeData)
{
if (m_nodeCache.retrieve (nodeHash, nodeData))
return true;
// VFALCO TODO Use a dependency injection here
Transaction::pointer txn = getApp().getMasterTransaction().fetch(nodeHash, false);
auto txn = getApp().getMasterTransaction().fetch(nodeHash, false);
if (txn)
{
// this is a transaction, and we have it
WriteLog (lsTRACE, TransactionAcquire) << "Node in our acquiring TX set is TXN we have";
WriteLog (lsTRACE, TransactionAcquire)
<< "Node in our acquiring TX set is TXN we have";
Serializer s;
s.add32 (HashPrefix::transactionID);
txn->getSTransaction ()->add (s);

View File

@@ -37,7 +37,8 @@ public:
static char const* getCountedObjectName () { return "InboundLedger"; }
using pointer = std::shared_ptr <InboundLedger>;
using PeerDataPairType = std::pair < std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData> >;
using PeerDataPairType = std::pair<std::weak_ptr<Peer>,
std::shared_ptr<protocol::TMLedgerData>>;
// These are the reasons we might acquire a ledger
enum fcReason
@@ -50,7 +51,8 @@ public:
};
public:
InboundLedger (uint256 const& hash, std::uint32_t seq, fcReason reason, clock_type& clock);
InboundLedger(
uint256 const& hash, std::uint32_t seq, fcReason reason, clock_type&);
~InboundLedger ();
@@ -97,12 +99,14 @@ public:
bool gotData (std::weak_ptr<Peer>, std::shared_ptr<protocol::TMLedgerData>);
using neededHash_t = std::pair <protocol::TMGetObjectByHash::ObjectType, uint256>;
using neededHash_t =
std::pair <protocol::TMGetObjectByHash::ObjectType, uint256>;
std::vector<neededHash_t> getNeededHashes ();
// VFALCO TODO Replace uint256 with something semanticallyh meaningful
void filterNodes (std::vector<SHAMapNodeID>& nodeIDs, std::vector<uint256>& nodeHashes,
void filterNodes (
std::vector<SHAMapNodeID>& nodeIDs, std::vector<uint256>& nodeHashes,
int max, bool aggressive);
/** Return a Json::objectValue. */
@@ -127,7 +131,8 @@ private:
int processData (std::shared_ptr<Peer> peer, protocol::TMLedgerData& data);
bool takeHeader (std::string const& data);
bool takeTxNode (const std::vector<SHAMapNodeID>& IDs, const std::vector<Blob>& data,
bool takeTxNode (const std::vector<SHAMapNodeID>& IDs,
const std::vector<Blob>& data,
SHAMapAddNode&);
bool takeTxRootNode (Blob const& data, SHAMapAddNode&);
@@ -135,7 +140,8 @@ private:
// Don't use acronyms, but if we are going to use them at least
// capitalize them correctly.
//
bool takeAsNode (const std::vector<SHAMapNodeID>& IDs, const std::vector<Blob>& data,
bool takeAsNode (const std::vector<SHAMapNodeID>& IDs,
const std::vector<Blob>& data,
SHAMapAddNode&);
bool takeAsRootNode (Blob const& data, SHAMapAddNode&);

View File

@@ -84,8 +84,9 @@ public:
};
std::unique_ptr<InboundLedgers>
make_InboundLedgers (InboundLedgers::clock_type& clock, beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector);
make_InboundLedgers (
InboundLedgers::clock_type& clock, beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector);
} // ripple

View File

@@ -276,11 +276,16 @@ Ledger::Ledger (open_ledger_t, Ledger const& prevLedger)
getCloseAgree(prevLedger.info()), info_.seq);
// VFALCO Remove this call to getApp
if (prevLedger.info_.closeTime == 0)
{
info_.closeTime = roundCloseTime (
getApp().timeKeeper().closeTime().time_since_epoch().count(), info_.closeTimeResolution);
getApp().timeKeeper().closeTime().time_since_epoch().count(),
info_.closeTimeResolution);
}
else
{
info_.closeTime =
prevLedger.info_.closeTime + info_.closeTimeResolution;
}
}
Ledger::Ledger (void const* data,
@@ -409,7 +414,8 @@ void Ledger::setAccepted ()
{
// used when we acquired the ledger
// TODO: re-enable a test like the following:
// assert(closed() && (info_.closeTime != 0) && (info_.closeTimeResolution != 0));
// assert(closed() && (info_.closeTime != 0) &&
// (info_.closeTimeResolution != 0));
if ((info_.closeFlags & sLCF_NoConsensusTime) == 0)
info_.closeTime = roundCloseTime(
info_.closeTime, info_.closeTimeResolution);

View File

@@ -56,7 +56,8 @@ bool LedgerHistory::addLedger (Ledger::pointer ledger, bool validated)
LedgersByHash::ScopedLockType sl (m_ledgers_by_hash.peekMutex ());
const bool alreadyHad = m_ledgers_by_hash.canonicalize (ledger->getHash(), ledger, true);
const bool alreadyHad = m_ledgers_by_hash.canonicalize (
ledger->getHash(), ledger, true);
if (validated)
mLedgersByIndex[ledger->info().seq] = ledger->getHash();
@@ -66,7 +67,7 @@ bool LedgerHistory::addLedger (Ledger::pointer ledger, bool validated)
LedgerHash LedgerHistory::getLedgerHash (LedgerIndex index)
{
LedgersByHash::ScopedLockType sl (m_ledgers_by_hash.peekMutex ());
std::map<std::uint32_t, uint256>::iterator it (mLedgersByIndex.find (index));
auto it = mLedgersByIndex.find (index);
if (it != mLedgersByIndex.end ())
return it->second;
@@ -78,7 +79,7 @@ Ledger::pointer LedgerHistory::getLedgerBySeq (LedgerIndex index)
{
{
LedgersByHash::ScopedLockType sl (m_ledgers_by_hash.peekMutex ());
std::map <std::uint32_t, uint256>::iterator it (mLedgersByIndex.find (index));
auto it = mLedgersByIndex.find (index);
if (it != mLedgersByIndex.end ())
{
@@ -151,8 +152,8 @@ log_one(Ledger::pointer ledger, uint256 const& tx, char const* msg)
static
void
log_metadata_difference(Ledger::pointer builtLedger, Ledger::pointer validLedger,
uint256 const& tx)
log_metadata_difference(
Ledger::pointer builtLedger, Ledger::pointer validLedger, uint256 const& tx)
{
auto getMeta = [](Ledger const& ledger,
uint256 const& txID) -> std::shared_ptr<TxMeta>
@@ -294,7 +295,8 @@ leaves (SHAMap const& sm)
}
void LedgerHistory::handleMismatch (LedgerHash const& built, LedgerHash const& valid)
void LedgerHistory::handleMismatch (
LedgerHash const& built, LedgerHash const& valid)
{
assert (built != valid);
++mismatch_counter_;
@@ -409,7 +411,7 @@ void LedgerHistory::validatedLedger (Ledger::ref ledger)
ConsensusValidated::ScopedLockType sl (
m_consensus_validated.peekMutex());
std::shared_ptr< std::pair< LedgerHash, LedgerHash > > entry = std::make_shared<std::pair< LedgerHash, LedgerHash >>();
auto entry = std::make_shared<std::pair<LedgerHash, LedgerHash>>();
m_consensus_validated.canonicalize(index, entry, false);
if (entry->second != hash)
@@ -428,10 +430,11 @@ void LedgerHistory::validatedLedger (Ledger::ref ledger)
/** Ensure m_ledgers_by_hash doesn't have the wrong hash for a particular index
*/
bool LedgerHistory::fixIndex (LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
bool LedgerHistory::fixIndex (
LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
{
LedgersByHash::ScopedLockType sl (m_ledgers_by_hash.peekMutex ());
std::map<std::uint32_t, uint256>::iterator it (mLedgersByIndex.find (ledgerIndex));
auto it = mLedgersByIndex.find (ledgerIndex);
if ((it != mLedgersByIndex.end ()) && (it->second != ledgerHash) )
{

View File

@@ -81,7 +81,8 @@ public:
// The validated ledger is the last fully validated ledger
virtual Ledger::pointer getValidatedLedger () = 0;
// This is the last ledger we published to clients and can lag the validated ledger
// This is the last ledger we published to clients and can lag the validated
// ledger
virtual Ledger::ref getPublishedLedger () = 0;
virtual bool isValidLedger(LedgerInfo const&) = 0;
@@ -101,9 +102,11 @@ public:
virtual bool storeLedger (Ledger::pointer) = 0;
virtual void forceValid (Ledger::pointer) = 0;
virtual void setFullLedger (Ledger::pointer ledger, bool isSynchronous, bool isCurrent) = 0;
virtual void setFullLedger (
Ledger::pointer ledger, bool isSynchronous, bool isCurrent) = 0;
virtual void switchLedgers (Ledger::pointer lastClosed, Ledger::pointer newCurrent) = 0;
virtual void switchLedgers (
Ledger::pointer lastClosed, Ledger::pointer newCurrent) = 0;
virtual void failedSave(std::uint32_t seq, uint256 const& hash) = 0;
@@ -118,23 +121,28 @@ public:
/** Walk to a ledger's hash using the skip list
*/
virtual uint256 walkHashBySeq (std::uint32_t index) = 0;
virtual uint256 walkHashBySeq (std::uint32_t index, Ledger::ref referenceLedger) = 0;
virtual uint256 walkHashBySeq (
std::uint32_t index, Ledger::ref referenceLedger) = 0;
virtual Ledger::pointer getLedgerBySeq (std::uint32_t index) = 0;
virtual Ledger::pointer getLedgerByHash (uint256 const& hash) = 0;
virtual void setLedgerRangePresent (std::uint32_t minV, std::uint32_t maxV) = 0;
virtual void setLedgerRangePresent (
std::uint32_t minV, std::uint32_t maxV) = 0;
virtual uint256 getLedgerHash(std::uint32_t desiredSeq, Ledger::ref knownGoodLedger) = 0;
virtual uint256 getLedgerHash(
std::uint32_t desiredSeq, Ledger::ref knownGoodLedger) = 0;
virtual void addHeldTransaction (Transaction::ref trans) = 0;
virtual void fixMismatch (Ledger::ref ledger) = 0;
virtual bool haveLedger (std::uint32_t seq) = 0;
virtual void clearLedger (std::uint32_t seq) = 0;
virtual bool getValidatedRange (std::uint32_t& minVal, std::uint32_t& maxVal) = 0;
virtual bool getFullValidatedRange (std::uint32_t& minVal, std::uint32_t& maxVal) = 0;
virtual bool getValidatedRange (
std::uint32_t& minVal, std::uint32_t& maxVal) = 0;
virtual bool getFullValidatedRange (
std::uint32_t& minVal, std::uint32_t& maxVal) = 0;
virtual void tune (int size, int age) = 0;
virtual void sweep () = 0;
@@ -152,7 +160,8 @@ public:
virtual bool isNewPathRequest () = 0;
virtual void newOrderBookDB () = 0;
virtual bool fixIndex (LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) = 0;
virtual bool fixIndex (
LedgerIndex ledgerIndex, LedgerHash const& ledgerHash) = 0;
virtual void doLedgerCleaner(Json::Value const& parameters) = 0;
virtual beast::PropertyStream::Source& getPropertySource () = 0;

View File

@@ -115,7 +115,8 @@ public:
return mTime <= cutoff;
}
bool changePosition (uint256 const& newPosition, std::uint32_t newCloseTime);
bool changePosition (
uint256 const& newPosition, std::uint32_t newCloseTime);
void bowOut ();
Json::Value getJson () const;

View File

@@ -93,10 +93,14 @@ void OrderBookDB::update(
sle->getFieldH256 (sfRootIndex) == sle->getIndex())
{
Book book;
book.in.currency.copyFrom (sle->getFieldH160 (sfTakerPaysCurrency));
book.in.account.copyFrom (sle->getFieldH160 (sfTakerPaysIssuer));
book.out.account.copyFrom (sle->getFieldH160 (sfTakerGetsIssuer));
book.out.currency.copyFrom (sle->getFieldH160 (sfTakerGetsCurrency));
book.in.currency.copyFrom(sle->getFieldH160(
sfTakerPaysCurrency));
book.in.account.copyFrom(sle->getFieldH160 (
sfTakerPaysIssuer));
book.out.account.copyFrom(sle->getFieldH160(
sfTakerGetsIssuer));
book.out.currency.copyFrom (sle->getFieldH160(
sfTakerGetsCurrency));
uint256 index = getBookBase (book);
if (seen.insert (index).second)

View File

@@ -42,8 +42,8 @@ public:
*/
OrderBook::List getBooksByTakerPays (Issue const&);
/** @return a count of all orderbooks that want this issuerID and currencyID.
*/
/** @return a count of all orderbooks that want this issuerID and
currencyID. */
int getBookSize(Issue const&);
bool isBookToXRP (Issue const&);

View File

@@ -58,8 +58,8 @@ void DisputedTx::setVote (NodeID const& peer, bool votesYes)
// changes vote to no
else if (!votesYes && res.first->second)
{
WriteLog (lsDEBUG, LedgerConsensus) << "Peer " << peer
<< " now votes NO on " << mTransactionID;
WriteLog (lsDEBUG, LedgerConsensus)
<< "Peer " << peer << " now votes NO on " << mTransactionID;
++mNays;
--mYays;
res.first->second = false;

View File

@@ -71,8 +71,8 @@ public:
mOurVote = o;
}
// VFALCO NOTE its not really a peer, its the 160 bit hash of the validator's public key
//
// VFALCO NOTE its not really a peer, its the 160 bit hash of the
// validator's public key.
void setVote (NodeID const& peer, bool votesYes);
void unVote (NodeID const& peer);

View File

@@ -57,8 +57,8 @@ enum
,fetchSmallNodes = 32
};
InboundLedger::InboundLedger (uint256 const& hash, std::uint32_t seq, fcReason reason,
clock_type& clock)
InboundLedger::InboundLedger (
uint256 const& hash, std::uint32_t seq, fcReason reason, clock_type& clock)
: PeerSet (hash, ledgerAcquireTimeoutMillis, false, clock,
deprecatedLogs().journal("InboundLedger"))
, mHaveHeader (false)
@@ -135,8 +135,12 @@ void InboundLedger::init (ScopedLockType& collectionLock)
getApp ().getLedgerMaster ().storeLedger (mLedger);
// Check if this could be a newer fully-validated ledger
if ((mReason == fcVALIDATION) || (mReason == fcCURRENT) || (mReason == fcCONSENSUS))
if (mReason == fcVALIDATION ||
mReason == fcCURRENT ||
mReason == fcCONSENSUS)
{
getApp ().getLedgerMaster ().checkAccept (mLedger);
}
}
}
@@ -150,7 +154,7 @@ bool InboundLedger::tryLocal ()
if (!mHaveHeader)
{
// Nothing we can do without the ledger header
std::shared_ptr<NodeObject> node = getApp().getNodeStore ().fetch (mHash);
auto node = getApp().getNodeStore ().fetch (mHash);
if (!node)
{
@@ -169,7 +173,8 @@ bool InboundLedger::tryLocal ()
else
{
mLedger = std::make_shared<Ledger>(
node->getData().data(), node->getData().size(), true, getConfig());
node->getData().data(), node->getData().size(),
true, getConfig());
}
if (mLedger->getHash () != mHash)
@@ -466,8 +471,8 @@ void InboundLedger::trigger (Peer::ptr const& peer)
Message::pointer packet (std::make_shared <Message> (
tmBH, protocol::mtGET_OBJECTS));
{
for (PeerSetMap::iterator it = mPeers.begin (), end = mPeers.end ();
it != end; ++it)
for (auto it = mPeers.begin (), end = mPeers .end ();
it != end; ++it)
{
Peer::ptr iPeer (
getApp().overlay ().findPeerByShortID (it->first));
@@ -499,8 +504,9 @@ void InboundLedger::trigger (Peer::ptr const& peer)
if (!mHaveHeader && !mFailed)
{
tmGL.set_itype (protocol::liBASE);
if (m_journal.trace) m_journal.trace <<
"Sending header request to " << (peer ? "selected peer" : "all peers");
if (m_journal.trace) m_journal.trace
<< "Sending header request to "
<< (peer ? "selected peer" : "all peers");
sendRequest (tmGL, peer);
return;
}
@@ -529,8 +535,9 @@ void InboundLedger::trigger (Peer::ptr const& peer)
// we need the root node
tmGL.set_itype (protocol::liAS_NODE);
*tmGL.add_nodeids () = SHAMapNodeID ().getRawString ();
if (m_journal.trace) m_journal.trace <<
"Sending AS root request to " << (peer ? "selected peer" : "all peers");
if (m_journal.trace) m_journal.trace
<< "Sending AS root request to "
<< (peer ? "selected peer" : "all peers");
sendRequest (tmGL, peer);
return;
}
@@ -587,8 +594,8 @@ void InboundLedger::trigger (Peer::ptr const& peer)
"Sending AS node " << nodeIDs.size () <<
" request to " << (
peer ? "selected peer" : "all peers");
if (nodeIDs.size () == 1 && m_journal.trace) m_journal.trace <<
"AS node: " << nodeIDs[0];
if (nodeIDs.size () == 1 && m_journal.trace)
m_journal.trace << "AS node: " << nodeIDs[0];
sendRequest (tmGL, peer);
return;
}

View File

@@ -56,7 +56,8 @@ public:
{
}
Ledger::pointer acquire (uint256 const& hash, std::uint32_t seq, InboundLedger::fcReason reason)
Ledger::pointer acquire (
uint256 const& hash, std::uint32_t seq, InboundLedger::fcReason reason)
{
assert (hash.isNonZero ());
bool isNew = true;
@@ -99,7 +100,8 @@ public:
{
ScopedLockType sl (mLock);
hash_map<uint256, InboundLedger::pointer>::iterator it = mLedgers.find (hash);
auto it = mLedgers.
find (hash);
if (it != mLedgers.end ())
{
ret = it->second;
@@ -147,15 +149,19 @@ public:
{
protocol::TMLedgerData& packet = *packet_ptr;
WriteLog (lsTRACE, InboundLedger) << "Got data (" << packet.nodes ().size () << ") for acquiring ledger: " << hash;
WriteLog (lsTRACE, InboundLedger)
<< "Got data (" << packet.nodes ().size ()
<< ") for acquiring ledger: " << hash;
InboundLedger::pointer ledger = find (hash);
if (!ledger)
{
WriteLog (lsTRACE, InboundLedger) << "Got data for ledger we're no longer acquiring";
WriteLog (lsTRACE, InboundLedger)
<< "Got data for ledger we're no longer acquiring";
// If it's state node data, stash it because it still might be useful
// If it's state node data, stash it because it still might be
// useful.
if (packet.type () == protocol::liAS_NODE)
{
getApp().getJobQueue().addJob(jtLEDGER_DATA, "gotStaleData",
@@ -225,9 +231,11 @@ public:
ledger->runData ();
}
/** We got some data for a ledger we are no longer acquiring
Since we paid the price to receive it, we might as well stash it in case we need it.
Nodes are received in wire format and must be stashed/hashed in prefix format
/** We got some data for a ledger we are no longer acquiring Since we paid
the price to receive it, we might as well stash it in case we need it.
Nodes are received in wire format and must be stashed/hashed in prefix
format
*/
void gotStaleData (std::shared_ptr<protocol::TMLedgerData> packet_ptr)
{
@@ -254,7 +262,8 @@ public:
auto blob = std::make_shared<Blob> (s.begin(), s.end());
getApp().getLedgerMaster().addFetchPack (newNode->getNodeHash(), blob);
getApp().getLedgerMaster().addFetchPack(
newNode->getNodeHash(), blob);
}
}
catch (...)
@@ -317,12 +326,12 @@ public:
// getJson is expensive, so call without the lock
std::uint32_t seq = it.second->getSeq();
if (seq > 1)
ret[beast::lexicalCastThrow <std::string>(seq)] = it.second->getJson(0);
ret[std::to_string(seq)] = it.second->getJson(0);
else
ret[to_string (it.first)] = it.second->getJson(0);
}
return ret;
return ret;
}
void gotFetchPack (Job&)
@@ -365,7 +374,8 @@ public:
it->second->touch ();
++it;
}
else if ((it->second->getLastAction () + std::chrono::minutes (1)) < now)
else if ((it->second->getLastAction () +
std::chrono::minutes (1)) < now)
{
stuffToSweep.push_back (it->second);
// shouldn't cause the actual final delete
@@ -414,15 +424,17 @@ private:
//------------------------------------------------------------------------------
decltype(InboundLedgersImp::kReacquireInterval) InboundLedgersImp::kReacquireInterval{5};
decltype(InboundLedgersImp::kReacquireInterval)
InboundLedgersImp::kReacquireInterval{5};
InboundLedgers::~InboundLedgers()
{
}
std::unique_ptr<InboundLedgers>
make_InboundLedgers (InboundLedgers::clock_type& clock, beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector)
make_InboundLedgers (
InboundLedgers::clock_type& clock, beast::Stoppable& parent,
beast::insight::Collector::ptr const& collector)
{
return std::make_unique<InboundLedgersImp> (clock, parent, collector);
}

View File

@@ -63,11 +63,20 @@ public:
{
}
LedgerIndex minRange; // The lowest ledger in the range we're checking
LedgerIndex maxRange; // The highest ledger in the range we're checking
bool checkNodes; // Check all state/transaction nodes
bool fixTxns; // Rewrite SQL databases
int failures; // Number of errors encountered since last success
// The lowest ledger in the range we're checking.
LedgerIndex minRange;
// The highest ledger in the range we're checking
LedgerIndex maxRange;
// Check all state/transaction nodes
bool checkNodes;
// Rewrite SQL databases
bool fixTxns;
// Number of errors encountered since last success
int failures;
};
using SharedState = beast::SharedData <State>;
@@ -174,7 +183,7 @@ public:
ledger numbers to clean. If unspecified, clean all ledgers.
"full"
A boolean. When set to true, means clean everything possible.
A boolean. When true, means clean everything possible.
"fix_txns"
A boolean value indicating whether or not to fix the
@@ -184,7 +193,7 @@ public:
A boolean, when set to true means check the nodes.
"stop"
A boolean, when set to true informs the cleaner to gracefully
A boolean, when true informs the cleaner to gracefully
stop its current activities if any cleaning is taking place.
*/
@@ -261,7 +270,8 @@ public:
m_journal.warning <<
"Node missing from ledger " << ledger->info().seq;
getApp().getInboundLedgers().acquire (
ledger->getHash(), ledger->info().seq, InboundLedger::fcGENERIC);
ledger->getHash(), ledger->info().seq,
InboundLedger::fcGENERIC);
}
return hash ? *hash : zero; // kludge
}
@@ -301,7 +311,8 @@ public:
if(! getApp().getLedgerMaster().fixIndex(ledgerIndex, ledgerHash))
{
m_journal.debug << "ledger " << ledgerIndex << " had wrong entry in history";
m_journal.debug << "ledger " << ledgerIndex
<< " had wrong entry in history";
doTxns = true;
}
@@ -349,8 +360,9 @@ public:
ledgerHash = getLedgerHash(referenceLedger, ledgerIndex);
if (ledgerHash.isZero())
{
// No, Try to get another ledger that might have the hash we need
// Compute the index and hash of a ledger that will have the hash we need
// No. Try to get another ledger that might have the hash we
// need: compute the index and hash of a ledger that will have
// the hash we need.
LedgerIndex refIndex = getCandidateLedger (ledgerIndex);
LedgerHash refHash = getLedgerHash (referenceLedger, refIndex);
@@ -358,12 +370,14 @@ public:
assert (nonzero);
if (nonzero)
{
// We found the hash and sequence of a better reference ledger
// We found the hash and sequence of a better reference
// ledger.
referenceLedger =
getApp().getInboundLedgers().acquire(
refHash, refIndex, InboundLedger::fcGENERIC);
if (referenceLedger)
ledgerHash = getLedgerHash(referenceLedger, ledgerIndex);
ledgerHash = getLedgerHash(
referenceLedger, ledgerIndex);
}
}
}
@@ -411,7 +425,8 @@ public:
bool fail = false;
if (ledgerHash.isZero())
{
m_journal.info << "Unable to get hash for ledger " << ledgerIndex;
m_journal.info << "Unable to get hash for ledger "
<< ledgerIndex;
fail = true;
}
else if (!doLedger(ledgerIndex, ledgerHash, doNodes, doTxns))

View File

@@ -58,11 +58,14 @@ namespace ripple {
@param anyTransactions indicates whether any transactions have been received
@param previousProposers proposers in the last closing
@param proposersClosed proposers who have currently closed this ledger
@param proposersValidated proposers who have validated the last closed ledger
@param proposersValidated proposers who have validated the last closed
ledger
@param previousMSeconds time, in milliseconds, for the previous ledger to
reach consensus (in milliseconds)
@param currentMSeconds time, in milliseconds since the previous ledger closed
@param openMSeconds time, in milliseconds, since the previous LCL was computed
@param currentMSeconds time, in milliseconds since the previous ledger
closed
@param openMSeconds time, in milliseconds, since the previous LCL was
computed
@param idleInterval the network's desired idle interval
*/
bool shouldCloseLedger (
@@ -81,7 +84,8 @@ bool shouldCloseLedger (
WriteLog (lsWARNING, LedgerTiming) <<
"shouldCloseLedger Trans=" << (anyTransactions ? "yes" : "no") <<
" Prop: " << previousProposers << "/" << proposersClosed <<
" Secs: " << currentMSeconds << " (last: " << previousMSeconds << ")";
" Secs: " << currentMSeconds << " (last: " <<
previousMSeconds << ")";
return true;
}
@@ -148,7 +152,8 @@ enum class ConsensusState
@param currentFinished proposers who have validated a ledger after this one
@param previousAgreeTime how long, in milliseconds, it took to agree on the
last ledger
@param currentAgreeTime how long, in milliseconds, we've been trying to agree
@param currentAgreeTime how long, in milliseconds, we've been trying to
agree
*/
ConsensusState checkConsensus (
int previousProposers,
@@ -284,7 +289,9 @@ LedgerConsensusImp::LedgerConsensusImp (
<< "Correct LCL is: " << prevLCLHash;
}
}
else // update the network status table as to whether we're proposing/validating
else
// update the network status table as to whether we're
// proposing/validating
consensus_.setProposing (mProposing, mValidating);
}
@@ -381,7 +388,8 @@ Json::Value LedgerConsensusImp::getJson (bool full)
Json::Value ctj (Json::objectValue);
for (auto& ct : mCloseTimes)
{
ctj[beast::lexicalCastThrow <std::string> (ct.first)] = ct.second;
ctj[std::to_string(ct.first)] = ct.
second;
}
ret["close_times"] = ctj;
}
@@ -594,7 +602,8 @@ void LedgerConsensusImp::checkLCL ()
void LedgerConsensusImp::handleLCL (uint256 const& lclHash)
{
assert ((lclHash != mPrevLedgerHash) || (mPreviousLedger->getHash () != lclHash));
assert (lclHash != mPrevLedgerHash ||
mPreviousLedger->getHash () != lclHash);
if (mPrevLedgerHash != lclHash)
{
@@ -665,7 +674,7 @@ void LedgerConsensusImp::timerEntry ()
if ((state_ != State::finished) && (state_ != State::accepted))
checkLCL ();
mCurrentMSeconds = std::chrono::duration_cast <std::chrono::milliseconds>
mCurrentMSeconds = std::chrono::duration_cast<std::chrono::milliseconds>
(std::chrono::steady_clock::now() - mConsensusStartTime).count ();
mClosePercent = mCurrentMSeconds * 100 / mPreviousMSeconds;
@@ -722,7 +731,8 @@ void LedgerConsensusImp::statePreClose ()
if (mHaveCorrectLCL && getCloseAgree(mPreviousLedger->info()))
{
// we can use consensus timing
sinceClose = 1000 * (getApp().timeKeeper().closeTime().time_since_epoch().count()
sinceClose = 1000 * (
getApp().timeKeeper().closeTime().time_since_epoch().count()
- mPreviousLedger->info().closeTime);
idleInterval = 2 * mPreviousLedger->info().closeTimeResolution;
@@ -732,13 +742,15 @@ void LedgerConsensusImp::statePreClose ()
else
{
// Use the time we saw the last ledger close
sinceClose = 1000 * (getApp().timeKeeper().closeTime().time_since_epoch().count()
sinceClose = 1000 * (
getApp().timeKeeper().closeTime().time_since_epoch().count()
- consensus_.getLastCloseTime ());
idleInterval = LEDGER_IDLE_INTERVAL;
}
idleInterval = std::max (idleInterval, LEDGER_IDLE_INTERVAL);
idleInterval = std::max (idleInterval, 2 * mPreviousLedger->info().closeTimeResolution);
idleInterval = std::max (
idleInterval, 2 * mPreviousLedger->info().closeTimeResolution);
// Decide if we should close the ledger
if (shouldCloseLedger (anyTransactions
@@ -849,7 +861,8 @@ bool LedgerConsensusImp::haveConsensus ()
return true;
}
std::shared_ptr<SHAMap> LedgerConsensusImp::getTransactionTree (uint256 const& hash)
std::shared_ptr<SHAMap> LedgerConsensusImp::getTransactionTree (
uint256 const& hash)
{
auto it = mAcquired.find (hash);
if (it != mAcquired.end() && it->second)
@@ -1014,7 +1027,8 @@ void LedgerConsensusImp::accept (std::shared_ptr<SHAMap> set)
hotACCOUNT_NODE, newLCL->info().seq);
int tmf = newLCL->txMap().flushDirty (
hotTRANSACTION_NODE, newLCL->info().seq);
WriteLog (lsDEBUG, LedgerConsensus) << "Flushed " << asf << " accounts and " <<
WriteLog (lsDEBUG, LedgerConsensus) << "Flushed " <<
asf << " accounts and " <<
tmf << " transaction nodes";
// Accept ledger
@@ -1050,7 +1064,8 @@ void LedgerConsensusImp::accept (std::shared_ptr<SHAMap> set)
{
// Build validation
auto v = std::make_shared<STValidation> (newLCLHash,
consensus_.validationTimestamp (getApp().timeKeeper().now().time_since_epoch().count()),
consensus_.validationTimestamp (
getApp().timeKeeper().now().time_since_epoch().count()),
mValPublic, mProposing);
v->setFieldU32 (sfLedgerSequence, newLCL->info().seq);
addLoad(v); // Our network load
@@ -1140,8 +1155,10 @@ void LedgerConsensusImp::accept (std::shared_ptr<SHAMap> set)
}
{
auto lock = beast::make_lock(getApp().getMasterMutex(), std::defer_lock);
LedgerMaster::ScopedLockType sl (ledgerMaster_.peekMutex (), std::defer_lock);
auto lock = beast::make_lock(
getApp().getMasterMutex(), std::defer_lock);
LedgerMaster::ScopedLockType sl (
ledgerMaster_.peekMutex (), std::defer_lock);
std::lock(lock, sl);
auto const localTx = m_localTX.getTxSet();
@@ -1299,7 +1316,8 @@ void LedgerConsensusImp::addDisputedTransaction (
protocol::TMTransaction msg;
msg.set_rawtransaction (& (tx.front ()), tx.size ());
msg.set_status (protocol::tsNEW);
msg.set_receivetimestamp (getApp().timeKeeper().now().time_since_epoch().count());
msg.set_receivetimestamp (
getApp().timeKeeper().now().time_since_epoch().count());
getApp ().overlay ().foreach (send_always (
std::make_shared<Message> (
msg, protocol::mtTRANSACTION)));
@@ -1364,7 +1382,8 @@ void LedgerConsensusImp::sendHaveTxSet (uint256 const& hash, bool direct)
msg, protocol::mtHAVE_SET)));
}
void LedgerConsensusImp::statusChange (protocol::NodeEvent event, Ledger& ledger)
void LedgerConsensusImp::statusChange (
protocol::NodeEvent event, Ledger& ledger)
{
protocol::TMStatusChange s;
@@ -1414,7 +1433,8 @@ void LedgerConsensusImp::takeInitialPosition (Ledger& initialLedger)
ValidationSet parentSet = getApp().getValidations().getValidations (
mPreviousLedger->info().parentHash);
m_feeVote.doVoting (mPreviousLedger, parentSet, preSet);
getApp().getAmendmentTable ().doVoting (mPreviousLedger, parentSet, preSet);
getApp().getAmendmentTable ().doVoting (
mPreviousLedger, parentSet, preSet);
initialSet = preSet->snapShot (false);
}
else
@@ -1511,7 +1531,8 @@ void LedgerConsensusImp::updateOurPositions ()
else
{
// proposal is still fresh
++closeTimes[roundCloseTime (it->second->getCloseTime (), mCloseResolution)];
++closeTimes[roundCloseTime (
it->second->getCloseTime (), mCloseResolution)];
++it;
}
}
@@ -1563,14 +1584,16 @@ void LedgerConsensusImp::updateOurPositions ()
{
// no other times
mHaveCloseTimeConsensus = true;
closeTime = roundCloseTime (mOurPosition->getCloseTime (), mCloseResolution);
closeTime = roundCloseTime (
mOurPosition->getCloseTime (), mCloseResolution);
}
else
{
int participants = mPeerPositions.size ();
if (mProposing)
{
++closeTimes[roundCloseTime (mOurPosition->getCloseTime (), mCloseResolution)];
++closeTimes[roundCloseTime (
mOurPosition->getCloseTime (), mCloseResolution)];
++participants;
}
@@ -1620,7 +1643,8 @@ void LedgerConsensusImp::updateOurPositions ()
}
if (!changes &&
((closeTime != roundCloseTime (mOurPosition->getCloseTime (), mCloseResolution))
((closeTime != roundCloseTime (
mOurPosition->getCloseTime (), mCloseResolution))
|| mOurPosition->isStale (ourCutoff)))
{
// close time changed or our position is stale
@@ -1701,7 +1725,8 @@ void LedgerConsensusImp::checkOurValidation ()
}
auto v = std::make_shared<STValidation> (mPreviousLedger->getHash (),
consensus_.validationTimestamp (getApp().timeKeeper().now().time_since_epoch().count()),
consensus_.validationTimestamp (
getApp().timeKeeper().now().time_since_epoch().count()),
mValPublic, false);
addLoad(v);
v->setTrusted ();
@@ -1728,14 +1753,16 @@ void LedgerConsensusImp::beginAccept (bool synchronous)
return;
}
consensus_.newLCL (mPeerPositions.size (), mCurrentMSeconds, mNewLedgerHash);
consensus_.newLCL (
mPeerPositions.size (), mCurrentMSeconds, mNewLedgerHash);
if (synchronous)
accept (consensusSet);
else
{
getApp().getJobQueue().addJob (jtACCEPT, "acceptLedger",
std::bind (&LedgerConsensusImp::accept, shared_from_this (), consensusSet));
std::bind (&LedgerConsensusImp::accept, shared_from_this (),
consensusSet));
}
}
@@ -1758,7 +1785,8 @@ void LedgerConsensusImp::addLoad(STValidation::ref val)
std::shared_ptr <LedgerConsensus>
make_LedgerConsensus (ConsensusImp& consensus, int previousProposers,
int previousConvergeTime, InboundTransactions& inboundTransactions,
LocalTxs& localtx, LedgerMaster& ledgerMaster, LedgerHash const &prevLCLHash,
LocalTxs& localtx, LedgerMaster& ledgerMaster,
LedgerHash const &prevLCLHash,
Ledger::ref previousLedger, std::uint32_t closeTime, FeeVote& feeVote)
{
return std::make_shared <LedgerConsensusImp> (consensus, previousProposers,
@@ -1791,7 +1819,8 @@ applyTransaction (OpenView& view,
WriteLog (lsDEBUG, LedgerConsensus) << "TXN "
<< txn->getTransactionID ()
//<< (engine.view().open() ? " open" : " closed") // because of the optional in engine
//<< (engine.view().open() ? " open" : " closed")
// because of the optional in engine
<< (retryAssured ? "/retry" : "/final");
WriteLog (lsTRACE, LedgerConsensus) << txn->getJson (0);

View File

@@ -344,8 +344,9 @@ private:
std::shared_ptr <LedgerConsensus>
make_LedgerConsensus (ConsensusImp& consensus, int previousProposers,
int previousConvergeTime, InboundTransactions& inboundTransactions,
LocalTxs& localtx, LedgerMaster& ledgerMaster, LedgerHash const &prevLCLHash,
Ledger::ref previousLedger, std::uint32_t closeTime, FeeVote& feeVote);
LocalTxs& localtx, LedgerMaster& ledgerMaster,
LedgerHash const &prevLCLHash, Ledger::ref previousLedger,
std::uint32_t closeTime, FeeVote& feeVote);
} // ripple

View File

@@ -52,9 +52,14 @@
namespace ripple {
#define MIN_VALIDATION_RATIO 150 // 150/256ths of validations of previous ledger
#define MAX_LEDGER_GAP 100 // Don't catch up more than 100 ledgers (cannot exceed 256)
#define MAX_LEDGER_AGE_ACQUIRE 60 // Don't acquire history if ledger is too old
// 150/256ths of validations of previous ledger
#define MIN_VALIDATION_RATIO 150
// Don't catch up more than 100 ledgers (cannot exceed 256)
#define MAX_LEDGER_GAP 100
// Don't acquire history if ledger is too old
#define MAX_LEDGER_AGE_ACQUIRE 60
class LedgerMasterImp
: public LedgerMaster
@@ -68,14 +73,25 @@ public:
LockType m_mutex;
LedgerHolder mCurrentLedger; // The ledger we are currently processiong
LedgerHolder mClosedLedger; // The ledger that most recently closed
LedgerHolder mValidLedger; // The highest-sequence ledger we have fully accepted
Ledger::pointer mPubLedger; // The last ledger we have published
Ledger::pointer mPathLedger; // The last ledger we did pathfinding against
Ledger::pointer mHistLedger; // The last ledger we handled fetching history
// The ledger we are currently processing.
LedgerHolder mCurrentLedger;
// Fully validated ledger, whether or not we have the ledger resident
// The ledger that most recently closed.
LedgerHolder mClosedLedger;
// The highest-sequence ledger we have fully accepted.
LedgerHolder mValidLedger;
// The last ledger we have published.
Ledger::pointer mPubLedger;
// The last ledger we did pathfinding against.
Ledger::pointer mPathLedger;
// The last ledger we handled fetching history
Ledger::pointer mHistLedger;
// Fully validated ledger, whether or not we have the ledger resident.
std::pair <uint256, LedgerIndex> mLastValidLedger;
LedgerHistory mLedgerHistory;
@@ -87,16 +103,19 @@ public:
std::unique_ptr <LedgerCleaner> mLedgerCleaner;
int mMinValidations; // The minimum validations to publish a ledger
uint256 mLastValidateHash;
std::uint32_t mLastValidateSeq;
int mMinValidations; // The minimum validations to publish a ledger.
uint256 mLastValidateHash;
std::uint32_t mLastValidateSeq;
bool mAdvanceThread; // Publish thread is running
bool mAdvanceWork; // Publish thread has work to do
// Publish thread is running.
bool mAdvanceThread;
// Publish thread has work to do.
bool mAdvanceWork;
int mFillInProgress;
int mPathFindThread; // Pathfinder jobs dispatched
bool mPathFindNewRequest;
int mPathFindThread; // Pathfinder jobs dispatched
bool mPathFindNewRequest;
std::atomic <std::uint32_t> mPubLedgerClose;
std::atomic <std::uint32_t> mPubLedgerSeq;
@@ -144,7 +163,8 @@ public:
, mValidLedgerSeq (0)
, mBuildingLedgerSeq (0)
, standalone_ (config.RUN_STANDALONE)
, fetch_depth_ (getApp ().getSHAMapStore ().clampFetchDepth (config.FETCH_DEPTH))
, fetch_depth_ (getApp ().getSHAMapStore ().clampFetchDepth (
config.FETCH_DEPTH))
, ledger_history_ (config.LEDGER_HISTORY)
, ledger_fetch_size_ (config.getSize (siLedgerFetch))
, fetch_packs_ ("FetchPack", 65536, 45, stopwatch,
@@ -202,7 +222,7 @@ public:
}
// VFALCO int widening?
std::int64_t ret = getApp().timeKeeper().closeTime().time_since_epoch().count();
auto ret = getApp().timeKeeper().closeTime().time_since_epoch().count();
ret -= static_cast<std::int64_t> (pubClose);
ret = (ret > 0) ? ret : 0;
@@ -219,7 +239,7 @@ public:
return 999999;
}
std::int64_t ret = getApp().timeKeeper().closeTime().time_since_epoch().count();
auto ret = getApp().timeKeeper().closeTime().time_since_epoch().count();
ret -= static_cast<std::int64_t> (valClose);
ret = (ret > 0) ? ret : 0;
@@ -293,9 +313,11 @@ public:
void pushLedger (Ledger::pointer newLedger)
{
// Caller should already have properly assembled this ledger into "ready-to-close" form --
// all candidate transactions must already be applied
WriteLog (lsINFO, LedgerMaster) << "PushLedger: " << newLedger->getHash ();
// Caller should already have properly assembled this ledger into
// "ready-to-close" form -- all candidate transactions must already be
// applied
WriteLog (lsINFO, LedgerMaster) << "PushLedger: "
<< newLedger->getHash();
{
ScopedLockType ml (m_mutex);
@@ -399,8 +421,10 @@ public:
for (auto const& it : mHeldTransactions)
{
ApplyFlags flags = tapNONE;
if (getApp().getHashRouter().addSuppressionFlags (it.first.getTXID (), SF_SIGGOOD))
if (getApp().getHashRouter().addSuppressionFlags (
it.first.getTXID (), SF_SIGGOOD))
flags = flags | tapNO_CHECK_SIGN;
auto const result = apply(view,
*it.second, flags, getApp().getHashRouter(
).sigVerify(), getConfig(), j);
@@ -417,8 +441,10 @@ public:
{
ApplyFlags tepFlags = tapNONE;
if (getApp().getHashRouter ().addSuppressionFlags (it.first.getTXID (), SF_SIGGOOD))
tepFlags = static_cast<ApplyFlags> (tepFlags | tapNO_CHECK_SIGN);
if (getApp().getHashRouter ().addSuppressionFlags (
it.first.getTXID (), SF_SIGGOOD))
tepFlags = static_cast<ApplyFlags> (
tepFlags | tapNO_CHECK_SIGN);
auto const ret = apply(view, *it.second,
tepFlags, getApp().getHashRouter().sigVerify(),
@@ -434,11 +460,13 @@ public:
view.apply(*newOL);
}
CondLog (recovers != 0, lsINFO, LedgerMaster) << "Recovered " << recovers << " held transactions";
CondLog (recovers != 0, lsINFO, LedgerMaster)
<< "Recovered " << recovers << " held transactions";
// VFALCO TODO recreate the CanonicalTxSet object instead of resetting it
// VFALCO NOTE The hash for an open ledger is undefined so
// we use something that is a reasonable substitute.
// VFALCO TODO recreate the CanonicalTxSet object instead of resetting
// it.
// VFALCO NOTE The hash for an open ledger is undefined so we use
// something that is a reasonable substitute.
mHeldTransactions.reset (newOL->info().hash);
mCurrentLedger.set (newOL);
}
@@ -513,8 +541,8 @@ public:
if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
{
// Ensure we shrink the tips as much as possible
// If we have 7-9 and 8,9 are invalid, we don't want to see the 8 and shrink to just 9
// Ensure we shrink the tips as much as possible. If we have 7-9 and
// 8,9 are invalid, we don't want to see the 8 and shrink to just 9
// because then we'll have nothing when we could have 7.
while (pendingSaves.count(maxVal) > 0)
--maxVal;
@@ -625,12 +653,12 @@ public:
return;
}
// Select target Peer based on highest score.
// The score is randomized but biased in favor of Peers with low latency.
// Select target Peer based on highest score. The score is randomized
// but biased in favor of Peers with low latency.
Peer::ptr target;
{
int maxScore = 0;
Overlay::PeerSequence peerList = getApp().overlay ().getActivePeers ();
auto peerList = getApp().overlay ().getActivePeers();
for (auto const& peer : peerList)
{
if (peer->hasRange (missingIndex, missingIndex + 1))
@@ -651,10 +679,12 @@ public:
tmBH.set_query (true);
tmBH.set_type (protocol::TMGetObjectByHash::otFETCH_PACK);
tmBH.set_ledgerhash (haveHash.begin(), 32);
Message::pointer packet = std::make_shared<Message> (tmBH, protocol::mtGET_OBJECTS);
auto packet = std::make_shared<Message> (
tmBH, protocol::mtGET_OBJECTS);
target->send (packet);
WriteLog (lsTRACE, LedgerMaster) << "Requested fetch pack for " << missingIndex;
WriteLog (lsTRACE, LedgerMaster) << "Requested fetch pack for "
<< missingIndex;
}
else
WriteLog (lsDEBUG, LedgerMaster) << "No peer for fetch pack";
@@ -706,10 +736,12 @@ public:
invalidate << " prior ledgers invalidated";
}
void setFullLedger (Ledger::pointer ledger, bool isSynchronous, bool isCurrent)
void setFullLedger (
Ledger::pointer ledger, bool isSynchronous, bool isCurrent)
{
// A new ledger has been accepted as part of the trusted chain
WriteLog (lsDEBUG, LedgerMaster) << "Ledger " << ledger->info().seq << " accepted :" << ledger->getHash ();
WriteLog (lsDEBUG, LedgerMaster) << "Ledger " << ledger->info().seq
<< "accepted :" << ledger->getHash ();
assert (ledger->stateMap().getHash ().isNonZero ());
ledger->setValidated();
@@ -719,10 +751,10 @@ public:
mLedgerHistory.addLedger(ledger, true);
{
// Check the SQL database's entry for the sequence before this ledger,
// if it's not this ledger's parent, invalidate it
// Check the SQL database's entry for the sequence before this
// ledger, if it's not this ledger's parent, invalidate it
uint256 prevHash = Ledger::getHashByIndex (ledger->info().seq - 1);
if (prevHash.isNonZero () && (prevHash != ledger->info().parentHash))
if (prevHash.isNonZero () && prevHash != ledger->info().parentHash)
clearLedger (ledger->info().seq - 1);
}
@@ -746,15 +778,17 @@ public:
getApp().getOrderBookDB().setup(ledger);
}
if ((ledger->info().seq != 0) && haveLedger (ledger->info().seq - 1))
if (ledger->info().seq != 0 && haveLedger (ledger->info().seq - 1))
{
// we think we have the previous ledger, double check
Ledger::pointer prevLedger = getLedgerBySeq (ledger->info().seq - 1);
auto prevLedger = getLedgerBySeq (ledger->info().seq - 1);
if (!prevLedger || (prevLedger->getHash () != ledger->info().parentHash))
if (!prevLedger ||
(prevLedger->getHash () != ledger->info().parentHash))
{
WriteLog (lsWARNING, LedgerMaster) << "Acquired ledger invalidates previous ledger: " <<
(prevLedger ? "hashMismatch" : "missingLedger");
WriteLog (lsWARNING, LedgerMaster)
<< "Acquired ledger invalidates previous ledger: "
<< (prevLedger ? "hashMismatch" : "missingLedger");
fixMismatch (ledger);
}
}
@@ -764,10 +798,12 @@ public:
void failedSave(std::uint32_t seq, uint256 const& hash)
{
clearLedger(seq);
getApp().getInboundLedgers().acquire(hash, seq, InboundLedger::fcGENERIC);
getApp().getInboundLedgers().acquire(
hash, seq, InboundLedger::fcGENERIC);
}
// Check if the specified ledger can become the new last fully-validated ledger
// Check if the specified ledger can become the new last fully-validated
// ledger.
void checkAccept (uint256 const& hash, std::uint32_t seq)
{
@@ -817,8 +853,8 @@ public:
// FIXME: We may not want to fetch a ledger with just one
// trusted validation
ledger =
getApp().getInboundLedgers().acquire(hash, 0, InboundLedger::fcGENERIC);
ledger = getApp().getInboundLedgers().acquire(
hash, 0, InboundLedger::fcGENERIC);
}
if (ledger)
@@ -839,7 +875,8 @@ public:
if (mLastValidateHash.isNonZero ())
{
int val = getApp().getValidations ().getTrustedValidationCount (mLastValidateHash);
int val = getApp().getValidations ().getTrustedValidationCount (
mLastValidateHash);
val *= MIN_VALIDATION_RATIO;
val /= 256;
@@ -855,21 +892,26 @@ public:
if (ledger->info().seq <= mValidLedgerSeq)
return;
// Can we advance the last fully-validated ledger? If so, can we publish?
// Can we advance the last fully-validated ledger? If so, can we
// publish?
ScopedLockType ml (m_mutex);
if (ledger->info().seq <= mValidLedgerSeq)
return;
int minVal = getNeededValidations();
int tvc = getApp().getValidations().getTrustedValidationCount(ledger->getHash());
int tvc = getApp().getValidations().getTrustedValidationCount(
ledger->getHash());
if (tvc < minVal) // nothing we can do
{
WriteLog (lsTRACE, LedgerMaster) << "Only " << tvc << " validations for " << ledger->getHash();
WriteLog (lsTRACE, LedgerMaster)
<< "Only " << tvc << " validations for " << ledger->getHash();
return;
}
WriteLog (lsINFO, LedgerMaster) << "Advancing accepted ledger to " << ledger->info().seq << " with >= " << minVal << " validations";
WriteLog (lsINFO, LedgerMaster)
<< "Advancing accepted ledger to " << ledger->info().seq
<< " with >= " << minVal << " validations";
mLastValidateHash = ledger->getHash();
mLastValidateSeq = ledger->info().seq;
@@ -887,7 +929,8 @@ public:
std::uint64_t const base = getApp().getFeeTrack().getLoadBase();
auto fees = getApp().getValidations().fees (ledger->getHash(), base);
{
auto fees2 = getApp().getValidations().fees (ledger->info().parentHash, base);
auto fees2 = getApp().getValidations().fees (
ledger->info(). parentHash, base);
fees.reserve (fees.size() + fees2.size());
std::copy (fees2.begin(), fees2.end(), std::back_inserter(fees));
}
@@ -939,7 +982,8 @@ public:
// This ledger cannot be the new fully-validated ledger, but
// maybe we saved up validations for some other ledger that can be
auto const val = getApp().getValidations().getCurrentTrustedValidations();
auto const val =
getApp().getValidations().getCurrentTrustedValidations();
// Track validation counts with sequence numbers
class valSeq
@@ -1065,13 +1109,15 @@ public:
}
else if (! mPubLedger)
{
WriteLog (lsINFO, LedgerMaster) << "First published ledger will be " << mValidLedgerSeq;
WriteLog (lsINFO, LedgerMaster) << "First published ledger will be "
<< mValidLedgerSeq;
ret.push_back (mValidLedger.get ());
}
else if (mValidLedgerSeq > (mPubLedgerSeq + MAX_LEDGER_GAP))
{
WriteLog (lsWARNING, LedgerMaster) << "Gap in validated ledger stream " << mPubLedgerSeq << " - " <<
mValidLedgerSeq - 1;
WriteLog (lsWARNING, LedgerMaster)
<< "Gap in validated ledger stream " << mPubLedgerSeq
<< " - " << mValidLedgerSeq - 1;
Ledger::pointer valLedger = mValidLedger.get ();
ret.push_back (valLedger);
setPubLedger (valLedger);
@@ -1081,7 +1127,7 @@ public:
{
int acqCount = 0;
std::uint32_t pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
Ledger::pointer valLedger = mValidLedger.get ();
std::uint32_t valSeq = valLedger->info().seq;
@@ -1090,12 +1136,14 @@ public:
{
for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
{
WriteLog (lsTRACE, LedgerMaster) << "Trying to fetch/publish valid ledger " << seq;
WriteLog (lsTRACE, LedgerMaster)
<< "Trying to fetch/publish valid ledger " << seq;
Ledger::pointer ledger;
// This can throw
auto hash = hashOfSeq(*valLedger, seq, m_journal);
// VFALCO TODO Restructure this code so that zero is not used
// VFALCO TODO Restructure this code so that zero is not
// used.
if (! hash)
hash = zero; // kludge
if (seq == valSeq)
@@ -1105,7 +1153,9 @@ public:
}
else if (hash->isZero())
{
WriteLog (lsFATAL, LedgerMaster) << "Ledger: " << valSeq << " does not have hash for " << seq;
WriteLog (lsFATAL, LedgerMaster)
<< "Ledger: " << valSeq
<< " does not have hash for " << seq;
assert (false);
}
else
@@ -1130,11 +1180,13 @@ public:
}
catch (...)
{
WriteLog (lsERROR, LedgerMaster) << "findNewLedgersToPublish catches an exception";
WriteLog (lsERROR, LedgerMaster)
<< "findNewLedgersToPublish catches an exception";
}
}
WriteLog (lsTRACE, LedgerMaster) << "findNewLedgersToPublish> " << ret.size();
WriteLog (lsTRACE, LedgerMaster)
<< "findNewLedgersToPublish> " << ret.size();
return ret;
}
@@ -1147,12 +1199,14 @@ public:
if (!mAdvanceThread && !mValidLedger.empty ())
{
mAdvanceThread = true;
getApp().getJobQueue ().addJob (jtADVANCE, "advanceLedger",
std::bind (&LedgerMasterImp::advanceThread, this));
getApp().getJobQueue ().addJob (
jtADVANCE, "advanceLedger",
std::bind (&LedgerMasterImp::advanceThread, this));
}
}
// Return the hash of the valid ledger with a particular sequence, given a subsequent ledger known valid
// Return the hash of the valid ledger with a particular sequence, given a
// subsequent ledger known valid.
// VFALCO NOTE This should return boost::optional<uint256>
uint256 getLedgerHash(std::uint32_t desiredSeq, Ledger::ref knownGoodLedger)
{
@@ -1191,7 +1245,8 @@ public:
{
{
ScopedLockType ml (m_mutex);
if (getApp().getOPs().isNeedNetworkLedger () || mCurrentLedger.empty ())
if (getApp().getOPs().isNeedNetworkLedger() ||
mCurrentLedger.empty())
{
--mPathFindThread;
return;
@@ -1206,7 +1261,8 @@ public:
ScopedLockType ml (m_mutex);
if (!mValidLedger.empty() &&
(!mPathLedger || (mPathLedger->info().seq != mValidLedgerSeq)))
(!mPathLedger ||
(mPathLedger->info().seq != mValidLedgerSeq)))
{ // We have a new valid ledger since the last full pathfinding
mPathLedger = mValidLedger.get ();
lastLedger = mPathLedger;
@@ -1224,11 +1280,13 @@ public:
if (!standalone_)
{ // don't pathfind with a ledger that's more than 60 seconds old
std::int64_t age = getApp().timeKeeper().closeTime().time_since_epoch().count();
auto age = getApp().timeKeeper().closeTime().time_since_epoch()
.count();
age -= static_cast<std::int64_t> (lastLedger->info().closeTime);
if (age > 60)
{
WriteLog (lsDEBUG, LedgerMaster) << "Published ledger too old for updating paths";
WriteLog (lsDEBUG, LedgerMaster)
<< "Published ledger too old for updating paths";
--mPathFindThread;
return;
}
@@ -1236,12 +1294,15 @@ public:
try
{
getApp().getPathRequests().updateAll (lastLedger, job.getCancelCallback ());
getApp().getPathRequests().updateAll(
lastLedger, job.getCancelCallback());
}
catch (SHAMapMissingNode&)
{
WriteLog (lsINFO, LedgerMaster) << "Missing node detected during pathfinding";
getApp().getInboundLedgers().acquire(lastLedger->getHash (), lastLedger->info().seq,
WriteLog (lsINFO, LedgerMaster)
<< "Missing node detected during pathfinding";
getApp().getInboundLedgers().acquire(
lastLedger->getHash (), lastLedger->info().seq,
InboundLedger::fcGENERIC);
}
}
@@ -1264,7 +1325,8 @@ public:
return true;
}
// If the order book is radically updated, we need to reprocess all pathfinding requests
// If the order book is radically updated, we need to reprocess all
// pathfinding requests.
void newOrderBookDB ()
{
ScopedLockType ml (m_mutex);
@@ -1273,7 +1335,7 @@ public:
newPFWork("pf:newOBDB");
}
/** A thread needs to be dispatched to handle pathfinding work of some kind
/** A thread needs to be dispatched to handle pathfinding work of some kind.
*/
void newPFWork (const char *name)
{
@@ -1314,7 +1376,8 @@ public:
return mValidLedger.get ();
}
// This is the last ledger we published to clients and can lag the validated ledger
// This is the last ledger we published to clients and can lag the validated
// ledger.
Ledger::ref getPublishedLedger ()
{
return mPubLedger;
@@ -1617,13 +1680,15 @@ void LedgerMasterImp::doAdvance ()
missing = mCompleteLedgers.prevMissing(
mPubLedger->info().seq);
}
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance discovered missing " << missing;
WriteLog (lsTRACE, LedgerMaster)
<< "tryAdvance discovered missing " << missing;
if ((missing != RangeSet::absent) && (missing > 0) &&
shouldAcquire (mValidLedgerSeq, ledger_history_,
getApp ().getSHAMapStore ().getCanDelete (), missing) &&
((mFillInProgress == 0) || (missing > mFillInProgress)))
{
WriteLog (lsTRACE, LedgerMaster) << "advanceThread should acquire";
WriteLog (lsTRACE, LedgerMaster)
<< "advanceThread should acquire";
{
ScopedUnlockType sl(m_mutex);
uint256 hash = getLedgerHashForHistory (missing);
@@ -1632,23 +1697,30 @@ void LedgerMasterImp::doAdvance ()
Ledger::pointer ledger = getLedgerByHash (hash);
if (!ledger)
{
if (!getApp().getInboundLedgers().isFailure (hash))
if (!getApp().getInboundLedgers().isFailure (
hash))
{
ledger =
getApp().getInboundLedgers().acquire(hash,
missing,
InboundLedger::fcHISTORY);
if (! ledger && (missing > 32600) && shouldFetchPack (missing))
getApp().getInboundLedgers().acquire(
hash, missing,
InboundLedger::fcHISTORY);
if (! ledger && (missing > 32600) &&
shouldFetchPack (missing))
{
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance want fetch pack " << missing;
WriteLog (lsTRACE, LedgerMaster) <<
"tryAdvance want fetch pack " <<
missing;
fetch_seq_ = missing;
getFetchPack(hash, missing);
}
else
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance no fetch pack for " << missing;
WriteLog (lsTRACE, LedgerMaster) <<
"tryAdvance no fetch pack for " <<
missing;
}
else
WriteLog (lsDEBUG, LedgerMaster) << "tryAdvance found failed acquire";
WriteLog (lsDEBUG, LedgerMaster) <<
"tryAdvance found failed acquire";
}
if (ledger)
{
@@ -1666,9 +1738,10 @@ void LedgerMasterImp::doAdvance ()
// Previous ledger is in DB
ScopedLockType lock (m_mutex);
mFillInProgress = ledger->info().seq;
getApp().getJobQueue().addJob(jtADVANCE, "tryFill", std::bind (
&LedgerMasterImp::tryFill, this,
std::placeholders::_1, ledger));
getApp().getJobQueue().addJob(
jtADVANCE, "tryFill", std::bind (
&LedgerMasterImp::tryFill, this,
std::placeholders::_1, ledger));
}
progress = true;
}
@@ -1679,30 +1752,38 @@ void LedgerMasterImp::doAdvance ()
for (int i = 0; i < ledger_fetch_size_; ++i)
{
std::uint32_t seq = missing - i;
uint256 hash = getLedgerHashForHistory (seq);
auto hash =
getLedgerHashForHistory(seq);
if (hash.isNonZero())
getApp().getInboundLedgers().acquire(hash,
seq, InboundLedger::fcHISTORY);
getApp().getInboundLedgers().acquire
(hash, seq,
InboundLedger::fcHISTORY);
}
}
catch (...)
{
WriteLog (lsWARNING, LedgerMaster) << "Threw while prefetching";
WriteLog (lsWARNING, LedgerMaster) <<
"Threw while prefetching";
}
}
}
else
{
WriteLog (lsFATAL, LedgerMaster) << "Unable to find ledger following prevMissing " << missing;
WriteLog (lsFATAL, LedgerMaster) << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
WriteLog (lsFATAL, LedgerMaster) << "Ledgers: " << getApp().getLedgerMaster().getCompleteLedgers();
WriteLog (lsFATAL, LedgerMaster) <<
"Can't find ledger following prevMissing " <<
missing;
WriteLog (lsFATAL, LedgerMaster) << "Pub:" <<
mPubLedgerSeq << " Val:" << mValidLedgerSeq;
WriteLog (lsFATAL, LedgerMaster) << "Ledgers: " <<
getApp().getLedgerMaster().getCompleteLedgers();
clearLedger (missing + 1);
progress = true;
}
}
if (mValidLedgerSeq != mPubLedgerSeq)
{
WriteLog (lsDEBUG, LedgerMaster) << "tryAdvance found last valid changed";
WriteLog (lsDEBUG, LedgerMaster) <<
"tryAdvance found last valid changed";
progress = true;
}
}
@@ -1710,7 +1791,8 @@ void LedgerMasterImp::doAdvance ()
else
{
mHistLedger.reset();
WriteLog (lsTRACE, LedgerMaster) << "tryAdvance not fetching history";
WriteLog (lsTRACE, LedgerMaster) <<
"tryAdvance not fetching history";
}
}
else

View File

@@ -43,7 +43,7 @@ int getNextLedgerTimeResolution (
// If we did not previously agree, we try to decrease the resolution to
// improve the chance that we will agree now.
if (!previousAgree && ((ledgerSeq % decreaseLedgerTimeResolutionEvery) == 0))
if (!previousAgree && ledgerSeq % decreaseLedgerTimeResolutionEvery == 0)
{
if (++iter != std::end (ledgerPossibleTimeResolutions))
return *iter;
@@ -51,7 +51,7 @@ int getNextLedgerTimeResolution (
// If we previously agreed, we try to increase the resolution to determine
// if we can continue to agree.
if (previousAgree && ((ledgerSeq % increaseLedgerTimeResolutionEvery) == 0))
if (previousAgree && ledgerSeq % increaseLedgerTimeResolutionEvery == 0)
{
if (iter-- != std::begin (ledgerPossibleTimeResolutions))
return *iter;

View File

@@ -35,7 +35,7 @@ namespace HTTP {
read from the stream until there is enough to determine a result.
No bytes are discarded from buf. Any additional bytes read are retained.
buf must provide an interface compatible with boost::asio::streambuf
http://www.boost.org/doc/libs/1_56_0/doc/html/boost_asio/reference/streambuf.html
http://boost.org/doc/libs/1_56_0/doc/html/boost_asio/reference/streambuf.html
See
http://www.ietf.org/rfc/rfc2246.txt
Section 7.4. Handshake protocol

View File

@@ -300,7 +300,8 @@ ServerHandlerImp::processRequest (
Resource::Consumer usage;
if (role == Role::ADMIN)
usage = m_resourceManager.newAdminEndpoint (remoteIPAddress.to_string());
usage = m_resourceManager.newAdminEndpoint (
remoteIPAddress.to_string());
else
usage = m_resourceManager.newInboundEndpoint(remoteIPAddress);
@@ -542,8 +543,8 @@ parse_Port (ParsedPort& port, Section const& section, std::ostream& log)
auto const ul = std::stoul(result.first);
if (ul > std::numeric_limits<std::uint16_t>::max())
{
log <<
"Value '" << result.first << "' for key 'port' is out of range\n";
log << "Value '" << result.first
<< "' for key 'port' is out of range\n";
throw std::exception();
}
if (ul == 0)

View File

@@ -32,8 +32,8 @@ namespace ripple {
class NetworkOPs;
std::unique_ptr <ServerHandler>
make_ServerHandler (beast::Stoppable& parent, boost::asio::io_service& io_service,
JobQueue& jobQueue, NetworkOPs& networkOPs, Resource::Manager& resourceManager,
make_ServerHandler (beast::Stoppable& parent, boost::asio::io_service&,
JobQueue&, NetworkOPs&, Resource::Manager&,
CollectorManager& cm);
} // ripple

View File

@@ -36,7 +36,7 @@
class AutoSocket
{
public:
using ssl_socket = boost::asio::ssl::stream<boost::asio::ip::tcp::socket>;
using ssl_socket = boost::asio::ssl::stream<boost::asio::ip::tcp::socket>;
using endpoint_type = boost::asio::ip::tcp::socket::endpoint_type;
using socket_ptr = std::shared_ptr<ssl_socket>;
using plain_socket = ssl_socket::next_layer_type;
@@ -53,7 +53,9 @@ public:
mSocket = std::make_shared<ssl_socket> (s, c);
}
AutoSocket (boost::asio::io_service& s, boost::asio::ssl::context& c, bool secureOnly, bool plainOnly)
AutoSocket (
boost::asio::io_service& s, boost::asio::ssl::context& c,
bool secureOnly, bool plainOnly)
: mSecure (secureOnly)
, mBuffer ((plainOnly || secureOnly) ? 0 : 4)
{
@@ -118,7 +120,8 @@ public:
}
static bool rfc2818_verify (std::string const& domain, bool preverified, boost::asio::ssl::verify_context& ctx)
static bool rfc2818_verify (std::string const& domain, bool preverified,
boost::asio::ssl::verify_context& ctx)
{
using namespace ripple;
@@ -138,15 +141,19 @@ public:
mSocket->set_verify_mode (boost::asio::ssl::verify_peer);
// XXX Verify semantics of RFC 2818 are what we want.
mSocket->set_verify_callback (std::bind (&rfc2818_verify, strDomain, std::placeholders::_1, std::placeholders::_2), ec);
mSocket->set_verify_callback (
std::bind (&rfc2818_verify, strDomain,
std::placeholders::_1, std::placeholders::_2), ec);
return ec;
}
/*
template <typename HandshakeHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(HandshakeHandler, void (boost::system::error_code))
async_handshake (handshake_type role, BOOST_ASIO_MOVE_ARG(HandshakeHandler) handler)
BOOST_ASIO_INITFN_RESULT_TYPE(HandshakeHandler,
void (boost::system::error_code))
async_handshake (handshake_type role,
BOOST_ASIO_MOVE_ARG(HandshakeHandler) handler)
{
return async_handshake_cb (role, handler);
}
@@ -170,9 +177,14 @@ public:
else
{
// autodetect
mSocket->next_layer ().async_receive (boost::asio::buffer (mBuffer), boost::asio::socket_base::message_peek,
std::bind (&AutoSocket::handle_autodetect, this, cbFunc,
beast::asio::placeholders::error, beast::asio::placeholders::bytes_transferred));
mSocket->next_layer ().async_receive (
boost::asio::buffer (mBuffer),
boost::asio::socket_base::message_peek,
std::bind (
&AutoSocket::handle_autodetect,
this, cbFunc,
beast::asio::placeholders::error,
beast::asio::placeholders::bytes_transferred));
}
}
@@ -207,61 +219,70 @@ public:
}
template <typename Seq, typename Condition, typename Handler>
void async_read_until (const Seq& buffers, Condition condition, Handler handler)
void async_read_until(
const Seq& buffers, Condition condition, Handler handler)
{
if (isSecure ())
boost::asio::async_read_until (*mSocket, buffers, condition, handler);
if (isSecure())
boost::asio::async_read_until(
*mSocket, buffers, condition, handler);
else
boost::asio::async_read_until (PlainSocket (), buffers, condition, handler);
boost::asio::async_read_until(
PlainSocket (), buffers, condition, handler);
}
template <typename Allocator, typename Handler>
void async_read_until (boost::asio::basic_streambuf<Allocator>& buffers, std::string const& delim, Handler handler)
void async_read_until(boost::asio::basic_streambuf<Allocator>& buffers,
std::string const& delim, Handler handler)
{
if (isSecure ())
boost::asio::async_read_until (*mSocket, buffers, delim, handler);
boost::asio::async_read_until(*mSocket, buffers, delim, handler);
else
boost::asio::async_read_until (PlainSocket (), buffers, delim, handler);
boost::asio::async_read_until(
PlainSocket(), buffers, delim, handler);
}
template <typename Allocator, typename MatchCondition, typename Handler>
void async_read_until (boost::asio::basic_streambuf<Allocator>& buffers, MatchCondition cond, Handler handler)
void async_read_until (boost::asio::basic_streambuf<Allocator>& buffers,
MatchCondition cond, Handler handler)
{
if (isSecure ())
boost::asio::async_read_until (*mSocket, buffers, cond, handler);
boost::asio::async_read_until(*mSocket, buffers, cond, handler);
else
boost::asio::async_read_until (PlainSocket (), buffers, cond, handler);
boost::asio::async_read_until(
PlainSocket(), buffers, cond, handler);
}
template <typename Buf, typename Handler>
void async_write (const Buf& buffers, Handler handler)
{
if (isSecure ())
boost::asio::async_write (*mSocket, buffers, handler);
boost::asio::async_write(*mSocket, buffers, handler);
else
boost::asio::async_write (PlainSocket (), buffers, handler);
boost::asio::async_write(PlainSocket (), buffers, handler);
}
template <typename Allocator, typename Handler>
void async_write (boost::asio::basic_streambuf<Allocator>& buffers, Handler handler)
void async_write (boost::asio::basic_streambuf<Allocator>& buffers,
Handler handler)
{
if (isSecure ())
boost::asio::async_write (*mSocket, buffers, handler);
boost::asio::async_write(*mSocket, buffers, handler);
else
boost::asio::async_write (PlainSocket (), buffers, handler);
boost::asio::async_write(PlainSocket(), buffers, handler);
}
template <typename Buf, typename Condition, typename Handler>
void async_read (const Buf& buffers, Condition cond, Handler handler)
{
if (isSecure ())
boost::asio::async_read (*mSocket, buffers, cond, handler);
boost::asio::async_read(*mSocket, buffers, cond, handler);
else
boost::asio::async_read (PlainSocket (), buffers, cond, handler);
boost::asio::async_read(PlainSocket(), buffers, cond, handler);
}
template <typename Allocator, typename Condition, typename Handler>
void async_read (boost::asio::basic_streambuf<Allocator>& buffers, Condition cond, Handler handler)
void async_read (boost::asio::basic_streambuf<Allocator>& buffers,
Condition cond, Handler handler)
{
if (isSecure ())
boost::asio::async_read (*mSocket, buffers, cond, handler);
@@ -288,7 +309,8 @@ public:
}
protected:
void handle_autodetect (callback cbFunc, const error_code& ec, size_t bytesTransferred)
void handle_autodetect (
callback cbFunc, const error_code& ec, size_t bytesTransferred)
{
using namespace ripple;
@@ -299,9 +321,12 @@ protected:
cbFunc (ec);
}
else if ((mBuffer[0] < 127) && (mBuffer[0] > 31) &&
((bytesTransferred < 2) || ((mBuffer[1] < 127) && (mBuffer[1] > 31))) &&
((bytesTransferred < 3) || ((mBuffer[2] < 127) && (mBuffer[2] > 31))) &&
((bytesTransferred < 4) || ((mBuffer[3] < 127) && (mBuffer[3] > 31))))
((bytesTransferred < 2)
|| ((mBuffer[1] < 127) && (mBuffer[1] > 31))) &&
((bytesTransferred < 3)
|| ((mBuffer[2] < 127) && (mBuffer[2] > 31))) &&
((bytesTransferred < 4)
|| ((mBuffer[3] < 127) && (mBuffer[3] > 31))))
{
// not ssl
WriteLog (lsTRACE, AutoSocket) << "non-SSL";

View File

@@ -59,11 +59,12 @@ struct Config04 : ConfigBase04 {
using elog_type = type::elog_type;
using request_type = type::request_type;
using response_type = type::response_type;
using socket_type = websocketpp::transport::asio::basic_socket::endpoint;
using socket_type =
websocketpp::transport::asio::basic_socket::endpoint;
};
using transport_type = websocketpp::transport::asio::endpoint<transport_config>
;
using transport_type =
websocketpp::transport::asio::endpoint<transport_config>;
};
} // websocket

View File

@@ -259,7 +259,7 @@ Json::Value ConnectionImpl <WebSocket>::invokeCommand (Json::Value& jvRequest)
Json::Value jvResult (Json::objectValue);
auto required = RPC::roleRequired (jvRequest[jss::command].asString());
Role const role = requestRole (required, m_port, jvRequest, m_remoteAddress);
auto role = requestRole (required, m_port, jvRequest, m_remoteAddress);
if (Role::FORBID == role)
{
@@ -339,9 +339,10 @@ void ConnectionImpl <WebSocket>::disconnect ()
connection_ptr ptr = m_connection.lock ();
if (ptr)
this->m_io_service.dispatch (WebSocket::getStrand (*ptr).wrap (std::bind (
&ConnectionImpl <WebSocket>::handle_disconnect,
m_connection)));
this->m_io_service.dispatch (
WebSocket::getStrand (*ptr).wrap (
std::bind(&ConnectionImpl <WebSocket>::handle_disconnect,
m_connection)));
}
// static