mirror of
https://github.com/Xahau/xahaud.git
synced 2025-11-30 23:45:48 +00:00
Refactor PeerFinder:
* Revise documentation in README.md * Inject abstract_clock in Manager * Introduce the Slot object as a replacement for Peer * New bullet-proof method for slot accounting * Replace Peer with Slot for tracking connections * Prevent duplicate outbound connection attempts * Improved connection and bootstrap business logic * Refactor PeerImp, PeersImp private interfaces * Give PeersImp access to the PeerImp interface * Handle errors retrieving endpoints from asio sockets * Use weak_ptr to manage PeerImp lifetime * Better handling of socket closure in PeerImp * Improve the orderly shutdown logic of PeersImp
This commit is contained in:
@@ -133,7 +133,7 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\Resolver.cpp">
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\SlotImp.h">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
@@ -1144,7 +1144,7 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple_app\peers\Peer.cpp">
|
||||
<ClCompile Include="..\..\src\ripple_app\peers\PeerImp.h">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
@@ -2235,22 +2235,20 @@
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\api\Config.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\api\Endpoint.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\api\Manager.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\api\Slot.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\api\Types.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Bootcache.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Checker.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\CheckerAdapter.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\FixedPeer.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Fixed.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Giveaways.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\iosformat.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Livecache.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Logic.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\LogicType.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Peer.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\PrivateTypes.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Reporting.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Resolver.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Seen.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Slots.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Counts.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Sorts.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Source.h" />
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\SourceStrings.h" />
|
||||
|
||||
@@ -702,9 +702,6 @@
|
||||
<ClCompile Include="..\..\src\ripple_app\paths\RippleState.cpp">
|
||||
<Filter>[2] Old Ripple\ripple_app\paths</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple_app\peers\Peer.cpp">
|
||||
<Filter>[2] Old Ripple\ripple_app\peers</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple_app\peers\Peers.cpp">
|
||||
<Filter>[2] Old Ripple\ripple_app\peers</Filter>
|
||||
</ClCompile>
|
||||
@@ -1098,15 +1095,9 @@
|
||||
<ClCompile Include="..\..\src\ripple\json\impl\Tests.cpp">
|
||||
<Filter>[1] Ripple\json\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\types\impl\JsonPropertyStream.cpp">
|
||||
<Filter>[1] Ripple\types\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\Checker.cpp">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\Cache.cpp">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\resource\ripple_resource.cpp">
|
||||
<Filter>[1] Ripple\resource</Filter>
|
||||
</ClCompile>
|
||||
@@ -1446,9 +1437,6 @@
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\Manager.cpp">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\Resolver.cpp">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\SourceStrings.cpp">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClCompile>
|
||||
@@ -1470,6 +1458,12 @@
|
||||
<ClCompile Include="..\..\src\ripple\json\impl\JsonPropertyStream.cpp">
|
||||
<Filter>[1] Ripple\json\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple\peerfinder\impl\SlotImp.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\ripple_app\peers\PeerImp.h">
|
||||
<Filter>[2] Old Ripple\ripple_app\peers</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\src\ripple_basics\containers\RangeSet.h">
|
||||
@@ -2475,30 +2469,15 @@
|
||||
<ClInclude Include="..\..\src\ripple\validators\impl\Count.h">
|
||||
<Filter>[1] Ripple\validators\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\types\api\JsonPropertyStream.h">
|
||||
<Filter>[1] Ripple\types\api</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Checker.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Tuning.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\LegacyEndpoint.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\LegacyEndpointCache.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\CheckerAdapter.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Cache.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\CachedEndpoint.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\resource\ripple_resource.h">
|
||||
<Filter>[1] Ripple\resource</Filter>
|
||||
</ClInclude>
|
||||
@@ -2937,9 +2916,6 @@
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\CheckerAdapter.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\FixedPeer.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Giveaways.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
@@ -2952,27 +2928,15 @@
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Logic.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\LogicType.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Peer.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\PrivateTypes.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Reporting.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Resolver.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Seen.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Slots.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Sorts.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
@@ -3048,6 +3012,15 @@
|
||||
<ClInclude Include="..\..\src\ripple\json\api\JsonPropertyStream.h">
|
||||
<Filter>[1] Ripple\json\api</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\api\Slot.h">
|
||||
<Filter>[1] Ripple\peerfinder\api</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Counts.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\ripple\peerfinder\impl\Fixed.h">
|
||||
<Filter>[1] Ripple\peerfinder\impl</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include="..\..\src\ripple_data\protocol\ripple.proto">
|
||||
|
||||
@@ -184,12 +184,4 @@
|
||||
#define RIPPLE_USE_VALIDATORS 0
|
||||
#endif
|
||||
|
||||
// Turning this on will use the new PeerFinder logic to establish connections
|
||||
// to other peers. Even with this off, PeerFinder will still send mtENDPOINTS
|
||||
// messages as needed, and collect legacy IP endpoint information.
|
||||
//
|
||||
#ifndef RIPPLE_USE_PEERFINDER
|
||||
#define RIPPLE_USE_PEERFINDER 0
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,234 +1,366 @@
|
||||
|
||||
# PeerFinder
|
||||
|
||||
The PeerFinder module has these responsibilities:
|
||||
## Introduction
|
||||
|
||||
- Maintain a set of addresses suitable for bootstrapping into the overlay.
|
||||
- Send and receive protocol messages for peer address discovery.
|
||||
- Provide network addresses to other peers that need them.
|
||||
- Maintain connections to the configured set of fixed peers.
|
||||
- Track and manage peer connection slots.
|
||||
Each _peer_ (a running instance of the **rippled** program) on the Ripple network
|
||||
maintains multiple TCP/IP connections to other peers (neighbors) who themselves
|
||||
have neighboring peers. The resulting network is called a _peer to peer overlay
|
||||
network_, or just [_overlay network_][overlay_network]. Messages passed along these
|
||||
connections travel between peers and implement the communication layer of the
|
||||
_Ripple peer protocol_.
|
||||
|
||||
## Description
|
||||
When a peer comes online it needs a set of IP addresses to connect to in order to
|
||||
gain initial entry into the overlay in a process called _bootstrapping_. Once they
|
||||
have established an initial set of these outbound peer connections, they need to
|
||||
gain additional addresses to establish more outbound peer connections until the
|
||||
desired limit is reached. Furthermore, they need a mechanism to advertise their
|
||||
IP address to new or existing peers in the overlay so they may receive inbound
|
||||
connections up to some desired limit. And finally, they need a mechanism to provide
|
||||
inbound connection requests with an alternate set of IP addresses to try when they
|
||||
have already reached their desired maximum number of inbound connections.
|
||||
|
||||
## Terms
|
||||
PeerFinder is a self contained module that provides these services, along with some
|
||||
additional overlay network management services such as _fixed slots_ and _cluster
|
||||
slots_.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>Bootstrap</td>
|
||||
<td>The process by which a Ripple peer obtains the initial set of
|
||||
connections into the Ripple payment network overlay.
|
||||
</td></tr>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Overlay</td>
|
||||
<td>The connected graph of Ripple peers, overlaid on the public Internet.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Peer</td>
|
||||
<td>A network server running the **rippled** daemon.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
## Features
|
||||
|
||||
### Exposition
|
||||
PeerFinder has these responsibilities
|
||||
|
||||
(Formerly in Manager.cpp, needs to be reformatted and tidied)
|
||||
* Maintain a persistent set of endpoint addresses suitable for bootstrapping
|
||||
into the peer to peer overlay, ranked by relative locally observed utility.
|
||||
|
||||
PeerFinder
|
||||
----------
|
||||
* Send and receive protocol messages for discovery of endpoint addresses.
|
||||
|
||||
Implements the logic for announcing and discovering IP addresses for
|
||||
for connecting into the Ripple network.
|
||||
* Provide endpoint addresses to new peers that need them.
|
||||
|
||||
Introduction
|
||||
------------
|
||||
* Maintain connections to a configured set of fixed peers.
|
||||
|
||||
Each Peer (a computer running rippled) on the Ripple network requires a certain
|
||||
number of connections to other peers. These connections form an "overlay
|
||||
network." When a new peer wants to join the network, they need a robust source
|
||||
of network addresses (IP addresses) in order to establish outgoing connections.
|
||||
Once they have joined the network, they need a method of announcing their
|
||||
availaibility of accepting incoming connections.
|
||||
* Impose limits on the various slots consumed by peer connections.
|
||||
|
||||
The Ripple network, like all peer to peer networks, defines a "directed graph"
|
||||
where each node represents a computer running the rippled software, and each
|
||||
vertex indicates a network connection. The direction of the connection tells
|
||||
us whether it is an outbound or inbound connection (from the perspective of
|
||||
a particular node).
|
||||
* Initiate outgoing connection attempts to endpoint addresses to maintain the
|
||||
overlay connectivity and fixed peer policies.
|
||||
|
||||
Fact #1:
|
||||
The total inbound and outbound connections of any overlay must be equal.
|
||||
* Verify the connectivity of neighbors who advertise inbound connection slots.
|
||||
|
||||
This follows that for each node that has an established outbound connection,
|
||||
there must exist another node that has received the corresponding inbound
|
||||
connection.
|
||||
* Prevent duplicate connections and connections to self.
|
||||
|
||||
When a new peer joins the network it may or may not wish to receive inbound
|
||||
connections. Some peers are unable to accept incoming connections for various.
|
||||
For security reasons they may be behind a firewall that blocks accept requests.
|
||||
The administers may decide they don't want the connection traffic. Or they
|
||||
may wish to connect only to specific peers. Or they may simply be misconfigured.
|
||||
---
|
||||
|
||||
If a peer decides that it wishes to receive incoming connections, it needs
|
||||
a method to announce its IP address and port number, the features that it
|
||||
offers (for example, that it also services client requests), and the number
|
||||
of available connection slots. This is to handle the case where the peer
|
||||
reaches its desired number of peer connections, but may still want to inform
|
||||
the network that it will service clients. It may also be desired to indicate
|
||||
the number of free client slots.
|
||||
# Concepts
|
||||
|
||||
Once a peer is connected to the network we need a way both to inform our
|
||||
neighbors of our status with respect to accepting connections, and also to
|
||||
learn about new fresh addresses to connect to. For this we will define the
|
||||
mtENDPOINTS message.
|
||||
## Manager
|
||||
|
||||
"Bootstrap Strategy"
|
||||
--------------------
|
||||
The `Manager` is an application singleton which provides the primary interface
|
||||
to interaction with the PeerFinder.
|
||||
|
||||
Fresh endpoints are ones we have seen recently via mtENDPOINTS.
|
||||
These are best to give out to someone who needs additional
|
||||
connections as quickly as possible, since it is very likely
|
||||
that the fresh endpoints have open incoming slots.
|
||||
### Autoconnect
|
||||
|
||||
Reliable endpoints are ones which are highly likely to be
|
||||
connectible over long periods of time. They might not necessarily
|
||||
have an incoming slot, but they are good for bootstrapping when
|
||||
there are no peers yet. Typically these are what we would want
|
||||
to store in a database or local config file for a future launch.
|
||||
The Autoconnect feature of PeerFinder automatically establishes outgoing
|
||||
connections using addresses learned from various sources including the
|
||||
configuration file, the result of domain name lookups, and messages received
|
||||
from the overlay itself.
|
||||
|
||||
Nouns:
|
||||
### Callback
|
||||
|
||||
bootstrap_ip
|
||||
numeric IPAddress
|
||||
PeerFinder is an isolated code module with few external dependencies. To perform
|
||||
socket specific activities such as establishing outgoing connections or sending
|
||||
messages to connected peers, the Manager is constructed with an abstract
|
||||
interface called the `Callback`. An instance of this interface performs the
|
||||
actual required operations, making PeerFinder independent of the calling code.
|
||||
|
||||
bootstrap_domain
|
||||
domain name / port combinations, resolution only
|
||||
### Config
|
||||
|
||||
bootstrap_url
|
||||
URL leading to a text file, with a series of entries.
|
||||
The `Config` structure defines the operational parameters of the PeerFinder.
|
||||
Some values come from the configuration file while others are calculated via
|
||||
tuned heuristics. The fields are as follows:
|
||||
|
||||
ripple.txt
|
||||
Separately parsed entity outside of PeerFinder that can provide
|
||||
bootstrap_ip, bootstrap_domain, and bootstrap_url items.
|
||||
* `autoConnect`
|
||||
|
||||
The process of obtaining the initial peer connections for accessing the Ripple
|
||||
peer to peer network, when there are no current connections, is called
|
||||
"bootstrapping." The algorithm is as follows:
|
||||
A flag indicating whether or not the Autoconnect feature is enabled.
|
||||
|
||||
1. If ( unusedLiveEndpoints.count() > 0
|
||||
OR activeConnectionAttempts.count() > 0)
|
||||
Try addresses from unusedLiveEndpoints
|
||||
return;
|
||||
2. If ( domainNames.count() > 0 AND (
|
||||
unusedBootstrapIPs.count() == 0
|
||||
OR activeNameResolutions.count() > 0) )
|
||||
ForOneOrMore (DomainName that hasn't been resolved recently)
|
||||
Contact DomainName and add entries to the unusedBootstrapIPs
|
||||
return;
|
||||
3. If (unusedBootstrapIPs.count() > 0)
|
||||
Try addresses from unusedBootstrapIPs
|
||||
return;
|
||||
4. Try entries from [ips]
|
||||
5. Try entries from [ips_urls]
|
||||
6. Increment generation number and go to 1
|
||||
* `wantIncoming`
|
||||
|
||||
- Keep a map of all current outgoing connection attempts
|
||||
A flag indicating whether or not the peer desires inbound connections. When
|
||||
this flag is turned off, a peer will not advertise itself in Endpoint
|
||||
messages.
|
||||
|
||||
"Connection Strategy"
|
||||
---------------------
|
||||
* `listeningPort`
|
||||
|
||||
This is the overall strategy a peer uses to maintain its position in the Ripple
|
||||
network graph
|
||||
The port number to use when creating the listening socket for peer
|
||||
connections.
|
||||
|
||||
We define these values:
|
||||
* `maxPeers`
|
||||
|
||||
peerCount (calculated)
|
||||
The number of currently connected and established peers
|
||||
The largest number of active peer connections to allow. This includes inbound
|
||||
and outbound connections, but excludes fixed and cluster peers. There is an
|
||||
implementation defined floor on this value.
|
||||
|
||||
outCount (calculated)
|
||||
The number of peers in PeerCount that are outbound connections.
|
||||
* `outPeers`
|
||||
|
||||
MinOutCount (hard-coded constant)
|
||||
The minimum number of OutCount we want. This also puts a floor
|
||||
on PeerCount. This protects against sybil attacks and makes
|
||||
sure that ledgers can get retrieved reliably.
|
||||
10 is the proposed value.
|
||||
The number of automatic outbound connections that PeerFinder will maintain
|
||||
when the Autoconnect feature is enabled. The value is computed with fractional
|
||||
precision as an implementation defined percentage of `maxPeers` subject to
|
||||
an implementation defined floor. An instance of the PeerFinder rounds the
|
||||
fractional part up or down using a uniform random number generated at
|
||||
program startup. This allows the outdegree of the overlay network to be
|
||||
controlled with fractional precision, ensuring that all inbound network
|
||||
connection slots are not consumed (which would it difficult for new
|
||||
participants to enter the network).
|
||||
|
||||
MaxPeerCount (a constant set in the rippled.cfg)
|
||||
The maximum number of peer connections, inbound or outbound,
|
||||
that a peer wishes to maintain. Setting MaxPeerCount equal to
|
||||
or below MinOutCount would disallow incoming connections.
|
||||
Here's an example of how the network might be structured with a fractional
|
||||
value for outPeers:
|
||||
|
||||
OutPercent (a baked-in program constant for now)
|
||||
The peer's target value for OutCount. When the value of OutCount
|
||||
is below this number, the peer will employ the Outgoing Strategy
|
||||
to raise its value of OutCount. This value is initially a constant
|
||||
in the program, defined by the developers. However, it
|
||||
may be changed through the consensus process.
|
||||
15% is a proposed value.
|
||||
|
||||
However, lets consider the case where OutDesired is exactly equal to MaxPeerCount / 2.
|
||||
In this case, a stable state will be reached when every peer is full, and
|
||||
has exactly the same number of inbound and outbound connections. The problem
|
||||
here is that there are now no available incoming connection slots. No new
|
||||
peers can enter the network.
|
||||
|
||||
Lets consider the case where OutDesired is exactly equal to (MaxPeerCount / 2) - 1.
|
||||
The stable state for this network (assuming all peers can accept incoming) will
|
||||
leave us with network degree equal to MaxPeerCount - 2, with all peers having two
|
||||
available incoming connection slots. The global number of incoming connection slots
|
||||
will be equal to twice the number of nodes on the network. While this might seem to
|
||||
be a desirable outcome, note that the connectedness (degree of the overlay) plays
|
||||
a large part in determining the levels of traffic and ability to receive validations
|
||||
from desired nodes. Having every node with available incoming connections also
|
||||
means that entries in pong caches will continually fall out with new values and
|
||||
information will become less useful.
|
||||
|
||||
For this reason, we advise that the value of OutDesired be fractional. Upon startup,
|
||||
a node will use its node ID (its 160 bit unique ID) to decide whether to round the
|
||||
value of OutDesired up or down. Using this method, we can precisely control the
|
||||
global number of available incoming connection slots.
|
||||
|
||||
"Outgoing Strategy"
|
||||
-------------------
|
||||
|
||||
This is the method a peer uses to establish outgoing connections into the
|
||||
Ripple network.
|
||||
|
||||
A peer whose PeerCount is zero will use these steps:
|
||||
1. Attempt addresses from a local database of addresses
|
||||
2. Attempt addresses from a set of "well known" domains in rippled.cfg
|
||||
**(Need example here)**
|
||||
|
||||
|
||||
This is the method used by a peer that is already connected to the Ripple network,
|
||||
to adjust the number of outgoing connections it is maintaining.
|
||||
|
||||
### Livecache
|
||||
|
||||
"Incoming Strategy"
|
||||
------------------------------
|
||||
The Livecache holds relayed IP addresses that have been received recently in
|
||||
the form of Endpoint messages via the peer to peer overlay. A peer periodically
|
||||
broadcasts the Endpoint message to its neighbors when it has open inbound
|
||||
connection slots. Peers store these messages in the Livecache and periodically
|
||||
forward their neighbors a handful of random entries from their Livecache, with
|
||||
an incremented hop count for each forwarded entry.
|
||||
|
||||
This is the method used by a peer to announce its ability and desire to receive
|
||||
incoming connections both for the purpose of obtaining additional peer connections
|
||||
and also for receiving requests from clients.
|
||||
The algorithm for sending a neighbor a set of Endpoint messages chooses evenly
|
||||
from all available hop counts on each send. This ensures that each peer
|
||||
will see some entries with the farthest hops at each iteration. The result is
|
||||
to expand a peer's horizon with respect to which overlay endpoints are visible.
|
||||
This is designed to force the overlay to become highly connected and reduce
|
||||
the network diameter with each connection establishment.
|
||||
|
||||
Overlay Network
|
||||
http://en.wikipedia.org/wiki/Overlay_network
|
||||
When a peer receives an Endpoint message that originates from a neighbor
|
||||
(identified by a hop count of zero) for the first time, it performs an incoming
|
||||
connection test on that neighbor by initiating an outgoing connection to the
|
||||
remote IP address as seen on the connection combined with the port advertised
|
||||
in the Endpoint message. If the test fails, then the peer considers its neighbor
|
||||
firewalled (intentionally or due to misconfiguration) and no longer forwards
|
||||
Endpoint messages for that peer. This prevents poor quality unconnectible
|
||||
addresses from landing in the caches. If the incoming connection test passes,
|
||||
then the peer fills in the Endpoint message with the remote address as seen on
|
||||
the connection before storing it in its cache and forwarding it to other peers.
|
||||
This relieves the neighbor from the responsibility of knowing its own IP address
|
||||
before it can start receiving incoming connections.
|
||||
|
||||
Directed Graph
|
||||
http://en.wikipedia.org/wiki/Directed_graph
|
||||
Livecache entries expire quickly. Since a peer stops advertising itself when
|
||||
it no longer has available inbound slots, its address will shortly after stop
|
||||
being handed out by other peers. Livecache entries are very likely to result
|
||||
in both a successful connection establishment and the acquisition of an active
|
||||
outbound slot. Compare this with Bootcache addresses, which are very likely to
|
||||
be connectible but unlikely to have an open slot.
|
||||
|
||||
References:
|
||||
Because entries in the Livecache are ephemeral, they are not persisted across
|
||||
launches in the database. The Livecache is continually updated and expired as
|
||||
Endpoint messages are received from the overlay over time.
|
||||
|
||||
Gnutella 0.6 Protocol
|
||||
2.2.2 Ping (0x00)
|
||||
2.2.3 Pong (0x01)
|
||||
2.2.4 Use of Ping and Pong messages
|
||||
2.2.4.1 A simple pong caching scheme
|
||||
2.2.4.2 Other pong caching schemes
|
||||
http://rfc-gnutella.sourceforge.net/src/rfc-0_6-draft.html
|
||||
### Bootcache
|
||||
|
||||
Revised Gnutella Ping Pong Scheme
|
||||
By Christopher Rohrs and Vincent Falco
|
||||
http://rfc-gnutella.sourceforge.net/src/pong-caching.html
|
||||
The `Bootcache` stores IP addresses useful for gaining initial connections.
|
||||
Each address is associated with the following metadata:
|
||||
|
||||
* **Uptime**
|
||||
|
||||
The number of seconds that the address has maintained an active
|
||||
peer connection, cumulative, without a connection attempt failure.
|
||||
|
||||
* **Valence**
|
||||
|
||||
A signed integer which represents the number of successful
|
||||
consecutive connection attempts when positive, and the number of
|
||||
failed consecutive connection attempts when negative. If an outgoing
|
||||
connection attempt to the corresponding IP address fails to complete the
|
||||
handshake, the valence is reset to negative one, and all accrued uptime is
|
||||
reset to zero. This harsh penalty is intended to prevent popular servers
|
||||
from forever remaining top ranked in all peer databases.
|
||||
|
||||
When choosing addresses from the boot cache for the purpose of
|
||||
establishing outgoing connections, addresses are ranked in decreasing
|
||||
order of high uptime, with valence as the tie breaker. The Bootcache is
|
||||
persistent. Entries are periodically inserted and updated in the corresponding
|
||||
SQLite database during program operation. When **rippled** is launched, the
|
||||
existing Bootcache database data is accessed and loaded to accelerate the
|
||||
bootstrap process.
|
||||
|
||||
Desirable entries in the Bootcache are addresses for servers which are known to
|
||||
have high uptimes, and for which connection attempts usually succeed. However,
|
||||
these servers do not necessarily have available inbound connection slots.
|
||||
However, it is assured that these servers will have a well populated Livecache
|
||||
since they will have moved towards the core of the overlay over their high
|
||||
uptime. When a connected server is full it will return a handful of new
|
||||
addresses from its Livecache and gracefully close the connection. Addresses
|
||||
from the Livecache are highly likely to have inbound connection slots and be
|
||||
connectible.
|
||||
|
||||
For security, all information that contributes to the ranking of Bootcache
|
||||
entries is observed locally. PeerFinder never trusts external sources of information.
|
||||
|
||||
### Slot
|
||||
|
||||
Each TCP/IP socket that can participate in the peer to peer overlay occupies
|
||||
a slot. Slots have properties and state associated with them:
|
||||
|
||||
#### State (Slot)
|
||||
|
||||
The slot state represents the current stage of the connection as it passes
|
||||
through the business logic for establishing peer connections.
|
||||
|
||||
* `accept`
|
||||
|
||||
The accept state is an initial state resulting from accepting an incoming
|
||||
connection request on a listening socket. The remote IP address and port
|
||||
are known, and a handshake is expected next.
|
||||
|
||||
* `connect`
|
||||
|
||||
The connect state is an initial state used when actively establishing outbound
|
||||
connection attempts. The desired remote IP address and port are known.
|
||||
|
||||
* `connected`
|
||||
|
||||
When an outbound connection attempt succeeds, it moves to the connected state.
|
||||
The handshake is initiated but not completed.
|
||||
|
||||
* `active`
|
||||
|
||||
The state becomes Active when a connection in either the Accepted or Connected
|
||||
state completes the handshake process, and a slot is available based on the
|
||||
properties. If no slot is available when the handshake completes, the socket
|
||||
is gracefully closed.
|
||||
|
||||
* `closing`
|
||||
|
||||
The Closing state represents a connected socket in the process of being
|
||||
gracefully closed.
|
||||
|
||||
#### Properties (Slot)
|
||||
|
||||
Slot properties may be combined and are not mutually exclusive.
|
||||
|
||||
* **Inbound**
|
||||
|
||||
An inbound slot is the condition of a socket which has accepted an incoming
|
||||
connection request. A connection which is not inbound is by definition
|
||||
outbound.
|
||||
|
||||
* **Fixed**
|
||||
|
||||
A fixed slot is a desired connection to a known peer identified by IP address,
|
||||
usually entered manually in the configuration file. For the purpose of
|
||||
establishing outbound connections, the peer also has an associated port number
|
||||
although only the IP address is checked to determine if the fixed peer is
|
||||
already connected. Fixed slots do not count towards connection limits.
|
||||
|
||||
* **Cluster**
|
||||
|
||||
A cluster slot is a connection which has completed the handshake stage, whose
|
||||
public key matches a known public key usually entered manually in the
|
||||
configuration file or learned through overlay messages from other trusted
|
||||
peers. Cluster slots do not count towards connection limits.
|
||||
|
||||
* **Superpeer** (2.0)
|
||||
|
||||
A superpeer slot is a connection to a peer which can accept incoming
|
||||
connections, meets certain resource availaibility requirements (such as
|
||||
bandwidth, CPU, and storage capacity), and operates full duplex in the
|
||||
overlay. Connections which are not superpeers are by definition leaves. A
|
||||
leaf slot is a connection to a peer which does not route overlay messages to
|
||||
other peers, and operates in a partial half duplex fashion in the overlay.
|
||||
|
||||
#### Fixed Slots
|
||||
|
||||
Fixed slots are identified by IP address and set up during the initialization
|
||||
of the Manager, usually from the configuration file. The Logic will always make
|
||||
outgoing connection attempts to each fixed slot which is not currently
|
||||
connected. If we receive an inbound connection from an endpoint whose address
|
||||
portion (without port) matches a fixed slot address, we consider the fixed
|
||||
slot to be connected.
|
||||
|
||||
#### Cluster Slots
|
||||
|
||||
Cluster slots are identified by the public key and set up during the
|
||||
initialization of the manager or discovered upon receipt of messages in the
|
||||
overlay from trusted connections.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
# Algorithms
|
||||
|
||||
## Connection Strategy
|
||||
|
||||
The _Connection Strategy_ applies the configuration settings to establish
|
||||
desired outbound connections. It runs periodically and progresses through a
|
||||
series of stages, remaining in each stage until a condition is met
|
||||
|
||||
### Stage 1: Fixed Slots
|
||||
|
||||
This stage is invoked when the number of active fixed connections is below the
|
||||
number of fixed connections specified in the configuration, and one of the
|
||||
following is true:
|
||||
|
||||
* There are eligible fixed addresses to try
|
||||
* Any outbound connection attempts are in progress
|
||||
|
||||
Each fixed address is associated with a retry timer. On a fixed connection
|
||||
failure, the timer is reset so that the address is not tried for some amount
|
||||
of time, which increases according to a scheduled sequence up to some maximum
|
||||
which is currently set to approximately one hour between retries. A fixed
|
||||
address is considered eligible if we are not currently connected or attempting
|
||||
the address, and its retry timer has expired.
|
||||
|
||||
The PeerFinder makes its best effort to become fully connected to the fixed
|
||||
addresses specified in the configuration file before moving on to establish
|
||||
outgoing connections to foreign peers. This security feature helps rippled
|
||||
establish itself with a trusted set of peers first before accepting untrusted
|
||||
data from the network.
|
||||
|
||||
### Stage 2: Livecache
|
||||
|
||||
The Livecache is invoked when Stage 1 is not active, autoconnect is enabled,
|
||||
and the number of active outbound connections is below the number desired. The
|
||||
stage remains active while:
|
||||
|
||||
* The Livecache has addresses to try
|
||||
* Any outbound connection attempts are in progress
|
||||
|
||||
PeerFinder makes its best effort to exhaust addresses in the Livecache before
|
||||
moving on to the Bootcache, because Livecache addresses are highly likely
|
||||
to be connectible (since they are known to have been online within the last
|
||||
minute), and highly likely to have an open slot for an incoming connection
|
||||
(because peers only advertise themselves in the Livecache when they have
|
||||
open slots).
|
||||
|
||||
### Stage 3: Bootcache
|
||||
|
||||
The Bootcache is invoked when Stage 1 and Stage 2 are not active, autoconnect
|
||||
is enabled, and the number of active outbound connections is below the number
|
||||
desired. The stage remains active while:
|
||||
|
||||
* There are addresses in the cache that have not been tried recently.
|
||||
|
||||
Entries in the Bootcache are ranked, with high uptime and highly connectible
|
||||
addresses preferred over others. Connection attempts to Bootcache addresses
|
||||
are very likely to succeed but unlikely to produce an active connection since
|
||||
the peers likely do not have open slots. Before the remote peer closes the
|
||||
connection it will send a handful of addresses from its Livecache to help the
|
||||
new peer coming online obtain connections.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
# References
|
||||
|
||||
Much of the work in PeerFinder was inspired by earlier work in Gnutella:
|
||||
|
||||
[Revised Gnutella Ping Pong Scheme](http://rfc-gnutella.sourceforge.net/src/pong-caching.html)<br>
|
||||
_By Christopher Rohrs and Vincent Falco_
|
||||
|
||||
[Gnutella 0.6 Protocol:](http://rfc-gnutella.sourceforge.net/src/rfc-0_6-draft.html) Sections:
|
||||
* 2.2.2 Ping (0x00)
|
||||
* 2.2.3 Pong (0x01)
|
||||
* 2.2.4 Use of Ping and Pong messages
|
||||
* 2.2.4.1 A simple pong caching scheme
|
||||
* 2.2.4.2 Other pong caching schemes
|
||||
|
||||
[overlay_network]: http://en.wikipedia.org/wiki/Overlay_network
|
||||
|
||||
127
src/ripple/peerfinder/README_WIP.md
Normal file
127
src/ripple/peerfinder/README_WIP.md
Normal file
@@ -0,0 +1,127 @@
|
||||
## Bootstrap Strategy
|
||||
|
||||
Fresh endpoints are ones we have seen recently via mtENDPOINTS.
|
||||
These are best to give out to someone who needs additional
|
||||
connections as quickly as possible, since it is very likely
|
||||
that the fresh endpoints have open incoming slots.
|
||||
|
||||
Reliable endpoints are ones which are highly likely to be
|
||||
connectible over long periods of time. They might not necessarily
|
||||
have an incoming slot, but they are good for bootstrapping when
|
||||
there are no peers yet. Typically these are what we would want
|
||||
to store in a database or local config file for a future launch.
|
||||
|
||||
Nouns:
|
||||
|
||||
bootstrap_ip
|
||||
numeric IPAddress
|
||||
|
||||
bootstrap_domain
|
||||
domain name / port combinations, resolution only
|
||||
|
||||
bootstrap_url
|
||||
URL leading to a text file, with a series of entries.
|
||||
|
||||
ripple.txt
|
||||
Separately parsed entity outside of PeerFinder that can provide
|
||||
bootstrap_ip, bootstrap_domain, and bootstrap_url items.
|
||||
|
||||
The process of obtaining the initial peer connections for accessing the Ripple
|
||||
peer to peer network, when there are no current connections, is called
|
||||
"bootstrapping." The algorithm is as follows:
|
||||
|
||||
1. If ( unusedLiveEndpoints.count() > 0
|
||||
OR activeConnectionAttempts.count() > 0)
|
||||
Try addresses from unusedLiveEndpoints
|
||||
return;
|
||||
2. If ( domainNames.count() > 0 AND (
|
||||
unusedBootstrapIPs.count() == 0
|
||||
OR activeNameResolutions.count() > 0) )
|
||||
ForOneOrMore (DomainName that hasn't been resolved recently)
|
||||
Contact DomainName and add entries to the unusedBootstrapIPs
|
||||
return;
|
||||
3. If (unusedBootstrapIPs.count() > 0)
|
||||
Try addresses from unusedBootstrapIPs
|
||||
return;
|
||||
4. Try entries from [ips]
|
||||
5. Try entries from [ips_urls]
|
||||
6. Increment generation number and go to 1
|
||||
|
||||
- Keep a map of all current outgoing connection attempts
|
||||
|
||||
"Connection Strategy"
|
||||
---------------------
|
||||
|
||||
This is the overall strategy a peer uses to maintain its position in the Ripple
|
||||
network graph
|
||||
|
||||
We define these values:
|
||||
|
||||
peerCount (calculated)
|
||||
The number of currently connected and established peers
|
||||
|
||||
outCount (calculated)
|
||||
The number of peers in PeerCount that are outbound connections.
|
||||
|
||||
MinOutCount (hard-coded constant)
|
||||
The minimum number of OutCount we want. This also puts a floor
|
||||
on PeerCount. This protects against sybil attacks and makes
|
||||
sure that ledgers can get retrieved reliably.
|
||||
10 is the proposed value.
|
||||
|
||||
MaxPeerCount (a constant set in the rippled.cfg)
|
||||
The maximum number of peer connections, inbound or outbound,
|
||||
that a peer wishes to maintain. Setting MaxPeerCount equal to
|
||||
or below MinOutCount would disallow incoming connections.
|
||||
|
||||
OutPercent (a baked-in program constant for now)
|
||||
The peer's target value for OutCount. When the value of OutCount
|
||||
is below this number, the peer will employ the Outgoing Strategy
|
||||
to raise its value of OutCount. This value is initially a constant
|
||||
in the program, defined by the developers. However, it
|
||||
may be changed through the consensus process.
|
||||
15% is a proposed value.
|
||||
|
||||
However, lets consider the case where OutDesired is exactly equal to MaxPeerCount / 2.
|
||||
In this case, a stable state will be reached when every peer is full, and
|
||||
has exactly the same number of inbound and outbound connections. The problem
|
||||
here is that there are now no available incoming connection slots. No new
|
||||
peers can enter the network.
|
||||
|
||||
Lets consider the case where OutDesired is exactly equal to (MaxPeerCount / 2) - 1.
|
||||
The stable state for this network (assuming all peers can accept incoming) will
|
||||
leave us with network degree equal to MaxPeerCount - 2, with all peers having two
|
||||
available incoming connection slots. The global number of incoming connection slots
|
||||
will be equal to twice the number of nodes on the network. While this might seem to
|
||||
be a desirable outcome, note that the connectedness (degree of the overlay) plays
|
||||
a large part in determining the levels of traffic and ability to receive validations
|
||||
from desired nodes. Having every node with available incoming connections also
|
||||
means that entries in pong caches will continually fall out with new values and
|
||||
information will become less useful.
|
||||
|
||||
For this reason, we advise that the value of OutDesired be fractional. Upon startup,
|
||||
a node will use its node ID (its 160 bit unique ID) to decide whether to round the
|
||||
value of OutDesired up or down. Using this method, we can precisely control the
|
||||
global number of available incoming connection slots.
|
||||
|
||||
"Outgoing Strategy"
|
||||
-------------------
|
||||
|
||||
This is the method a peer uses to establish outgoing connections into the
|
||||
Ripple network.
|
||||
|
||||
A peer whose PeerCount is zero will use these steps:
|
||||
1. Attempt addresses from a local database of addresses
|
||||
2. Attempt addresses from a set of "well known" domains in rippled.cfg
|
||||
|
||||
|
||||
This is the method used by a peer that is already connected to the Ripple network,
|
||||
to adjust the number of outgoing connections it is maintaining.
|
||||
|
||||
|
||||
"Incoming Strategy"
|
||||
------------------------------
|
||||
|
||||
This is the method used by a peer to announce its ability and desire to receive
|
||||
incoming connections both for the purpose of obtaining additional peer connections
|
||||
and also for receiving requests from clients.
|
||||
@@ -31,21 +31,18 @@ namespace PeerFinder {
|
||||
struct Callback
|
||||
{
|
||||
/** Initiate outgoing Peer connections to the specified set of endpoints. */
|
||||
virtual void connectPeers (IPAddresses const& addresses) = 0;
|
||||
virtual void connect (IPAddresses const& addresses) = 0;
|
||||
|
||||
/** Activate the handshaked peer with the specified address. */
|
||||
virtual void activate (Slot::ptr const& slot) = 0;
|
||||
|
||||
/** Sends a set of Endpoint records to the specified peer. */
|
||||
virtual void send (Slot::ptr const& slot, Endpoints const& endpoints) = 0;
|
||||
|
||||
/** Disconnect the handshaked peer with the specified address.
|
||||
@param graceful `true` to wait for send buffers to drain before closing.
|
||||
*/
|
||||
virtual void disconnectPeer (
|
||||
IPAddress const& remote_address, bool graceful) = 0;
|
||||
|
||||
/** Activate the handshaked peer with the specified address. */
|
||||
virtual void activatePeer (
|
||||
IPAddress const& remote_address) = 0;
|
||||
|
||||
/** Sends a set of Endpoint records to the specified peer. */
|
||||
virtual void sendEndpoints (IPAddress const& remote_address,
|
||||
Endpoints const& endpoints) = 0;
|
||||
virtual void disconnect (Slot::ptr const& slot, bool graceful) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -20,6 +20,9 @@
|
||||
#ifndef RIPPLE_PEERFINDER_MANAGER_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_MANAGER_H_INCLUDED
|
||||
|
||||
#include "Slot.h"
|
||||
#include "Types.h"
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
@@ -37,6 +40,7 @@ public:
|
||||
Stoppable& parent,
|
||||
SiteFiles::Manager& siteFiles,
|
||||
Callback& callback,
|
||||
clock_type& clock,
|
||||
Journal journal);
|
||||
|
||||
/** Destroy the object.
|
||||
@@ -70,45 +74,52 @@ public:
|
||||
/** Add a URL as a fallback location to obtain IPAddress sources.
|
||||
@param name A label used for diagnostics.
|
||||
*/
|
||||
/* VFALCO NOTE Unimplemented
|
||||
virtual void addFallbackURL (std::string const& name,
|
||||
std::string const& url) = 0;
|
||||
*/
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Called when a peer connection is accepted. */
|
||||
virtual void onPeerAccept (IPAddress const& local_address,
|
||||
IPAddress const& remote_address) = 0;
|
||||
|
||||
/** Called when an outgoing peer connection is attempted. */
|
||||
virtual void onPeerConnect (IPAddress const& address) = 0;
|
||||
|
||||
/** Called when an outgoing peer connection attempt succeeds. */
|
||||
virtual void onPeerConnected (IPAddress const& local_address,
|
||||
IPAddress const& remote_address) = 0;
|
||||
|
||||
/** Called when the real public address is discovered.
|
||||
Currently this happens when we receive a PROXY handshake. The
|
||||
protocol HELLO message will happen after the PROXY handshake.
|
||||
/** Create a new inbound slot with the specified remote endpoint.
|
||||
If nullptr is returned, then the slot could not be assigned.
|
||||
Usually this is because of a detected self-connection.
|
||||
*/
|
||||
virtual void onPeerAddressChanged (
|
||||
IPAddress const& currentAddress, IPAddress const& newAddress) = 0;
|
||||
virtual Slot::ptr new_inbound_slot (
|
||||
IP::Endpoint const& local_endpoint,
|
||||
IP::Endpoint const& remote_endpoint) = 0;
|
||||
|
||||
/** Called when a peer connection finishes the protocol handshake.
|
||||
@param id The node public key of the peer.
|
||||
@param inCluster The peer is a member of our cluster.
|
||||
/** Create a new outbound slot with the specified remote endpoint.
|
||||
If nullptr is returned, then the slot could not be assigned.
|
||||
Usually this is because of a duplicate connection.
|
||||
*/
|
||||
virtual void onPeerHandshake (
|
||||
IPAddress const& address, PeerID const& id, bool inCluster) = 0;
|
||||
virtual Slot::ptr new_outbound_slot (
|
||||
IP::Endpoint const& remote_endpoint) = 0;
|
||||
|
||||
/** Always called when the socket closes. */
|
||||
virtual void onPeerClosed (IPAddress const& address) = 0;
|
||||
/** Called when an outbound connection attempt succeeds.
|
||||
The local endpoint must be valid. If the caller receives an error
|
||||
when retrieving the local endpoint from the socket, it should
|
||||
proceed as if the connection attempt failed by calling on_closed
|
||||
instead of on_connected.
|
||||
*/
|
||||
virtual void on_connected (Slot::ptr const& slot,
|
||||
IP::Endpoint const& local_endpoint) = 0;
|
||||
|
||||
/** Called when a handshake is completed. */
|
||||
virtual void on_handshake (Slot::ptr const& slot,
|
||||
RipplePublicKey const& key, bool cluster) = 0;
|
||||
|
||||
/** Called when mtENDPOINTS is received. */
|
||||
virtual void onPeerEndpoints (IPAddress const& address,
|
||||
virtual void on_endpoints (Slot::ptr const& slot,
|
||||
Endpoints const& endpoints) = 0;
|
||||
|
||||
/** Called when legacy IP/port addresses are received. */
|
||||
virtual void onLegacyEndpoints (IPAddresses const& addresses) = 0;
|
||||
virtual void on_legacy_endpoints (IPAddresses const& addresses) = 0;
|
||||
|
||||
/** Called when the slot is closed.
|
||||
This always happens when the socket is closed.
|
||||
*/
|
||||
virtual void on_closed (Slot::ptr const& slot) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
81
src/ripple/peerfinder/api/Slot.h
Normal file
81
src/ripple/peerfinder/api/Slot.h
Normal file
@@ -0,0 +1,81 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_SLOT_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_SLOT_H_INCLUDED
|
||||
|
||||
#include "../../beast/beast/net/IPEndpoint.h"
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Properties and state associated with a peer to peer overlay connection. */
|
||||
class Slot
|
||||
{
|
||||
public:
|
||||
typedef std::shared_ptr <Slot> ptr;
|
||||
|
||||
enum State
|
||||
{
|
||||
accept,
|
||||
connect,
|
||||
connected,
|
||||
active,
|
||||
closing
|
||||
};
|
||||
|
||||
virtual ~Slot () = 0;
|
||||
|
||||
/** Returns `true` if this is an inbound connection. */
|
||||
virtual bool inbound () const = 0;
|
||||
|
||||
/** Returns `true` if this is a fixed connection.
|
||||
A connection is fixed if its remote endpoint is in the list of
|
||||
remote endpoints for fixed connections.
|
||||
*/
|
||||
virtual bool fixed () const = 0;
|
||||
|
||||
/** Returns `true` if this is a cluster connection.
|
||||
This is only known after then handshake completes.
|
||||
*/
|
||||
virtual bool cluster () const = 0;
|
||||
|
||||
/** Returns the state of the connection. */
|
||||
virtual State state () const = 0;
|
||||
|
||||
/** The remote endpoint of socket. */
|
||||
virtual IP::Endpoint const& remote_endpoint () const = 0;
|
||||
|
||||
/** The local endpoint of the socket, when known. */
|
||||
virtual boost::optional <IP::Endpoint> const& local_endpoint () const = 0;
|
||||
|
||||
/** The peer's public key, when known.
|
||||
The public key is established when the handshake is complete.
|
||||
*/
|
||||
virtual boost::optional <RipplePublicKey> const& public_key () const = 0;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -20,18 +20,19 @@
|
||||
#ifndef RIPPLE_PEERFINDER_TYPES_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_TYPES_H_INCLUDED
|
||||
|
||||
#include "beast/beast/chrono/abstract_clock.h"
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Used to identify peers. */
|
||||
typedef RipplePublicKey PeerID;
|
||||
|
||||
/** Represents a set of addresses. */
|
||||
typedef std::vector <IPAddress> IPAddresses;
|
||||
|
||||
/** A set of Endpoint used for connecting. */
|
||||
typedef std::vector <Endpoint> Endpoints;
|
||||
|
||||
typedef beast::abstract_clock <std::chrono::seconds> clock_type;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,9 +24,25 @@ namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Stores IP addresses useful for gaining initial connections.
|
||||
Ideal bootstrap addresses have the following attributes:
|
||||
- High uptime
|
||||
- Many successful connect attempts
|
||||
|
||||
This is one of the caches that is consulted when additional outgoing
|
||||
connections are needed. Along with the address, each entry has this
|
||||
additional metadata:
|
||||
|
||||
Uptime
|
||||
|
||||
The number of seconds that the address has maintained an active
|
||||
peer connection, cumulative, without a connection attempt failure.
|
||||
|
||||
Valence
|
||||
|
||||
A signed integer which represents the number of successful
|
||||
consecutive connection attempts when positive, and the number of
|
||||
failed consecutive connection attempts when negative.
|
||||
|
||||
When choosing addresses from the boot cache for the purpose of
|
||||
establishing outgoing connections, addresses are ranked in decreasing
|
||||
order of high uptime, with valence as the tie breaker.
|
||||
*/
|
||||
class Bootcache
|
||||
{
|
||||
@@ -41,7 +57,8 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
Endpoint (IPAddress const& address, int uptime, int valence)
|
||||
Endpoint (IPAddress const& address,
|
||||
std::chrono::seconds uptime, int valence)
|
||||
: m_address (address)
|
||||
, m_uptime (uptime)
|
||||
, m_valence (valence)
|
||||
@@ -53,7 +70,7 @@ public:
|
||||
return m_address;
|
||||
}
|
||||
|
||||
int uptime () const
|
||||
std::chrono::seconds uptime () const
|
||||
{
|
||||
return m_uptime;
|
||||
}
|
||||
@@ -65,7 +82,7 @@ public:
|
||||
|
||||
private:
|
||||
IPAddress m_address;
|
||||
int m_uptime;
|
||||
std::chrono::seconds m_uptime;
|
||||
int m_valence;
|
||||
};
|
||||
|
||||
@@ -77,33 +94,33 @@ public:
|
||||
struct Entry
|
||||
{
|
||||
Entry ()
|
||||
: cumulativeUptimeSeconds (0)
|
||||
, sessionUptimeSeconds (0)
|
||||
: cumulativeUptime (0)
|
||||
, sessionUptime (0)
|
||||
, connectionValence (0)
|
||||
, active (false)
|
||||
{
|
||||
}
|
||||
|
||||
/** Update the uptime measurement based on the time. */
|
||||
void update (DiscreteTime const now)
|
||||
void update (clock_type::time_point const& now)
|
||||
{
|
||||
// Must be active!
|
||||
consistency_check (active);
|
||||
assert (active);
|
||||
// Clock must be monotonically increasing
|
||||
consistency_check (now >= whenActive);
|
||||
assert (now >= whenActive);
|
||||
// Remove the uptime we added earlier in the
|
||||
// session and add back in the new uptime measurement.
|
||||
DiscreteTime const uptimeSeconds (now - whenActive);
|
||||
cumulativeUptimeSeconds -= sessionUptimeSeconds;
|
||||
cumulativeUptimeSeconds += uptimeSeconds;
|
||||
sessionUptimeSeconds = uptimeSeconds;
|
||||
auto const uptime (now - whenActive);
|
||||
cumulativeUptime -= sessionUptime;
|
||||
cumulativeUptime += uptime;
|
||||
sessionUptime = uptime;
|
||||
}
|
||||
|
||||
/** Our cumulative uptime with this address with no failures. */
|
||||
int cumulativeUptimeSeconds;
|
||||
std::chrono::seconds cumulativeUptime;
|
||||
|
||||
/** Amount of uptime from the current session (if any). */
|
||||
int sessionUptimeSeconds;
|
||||
std::chrono::seconds sessionUptime;
|
||||
|
||||
/** Number of consecutive connection successes or failures.
|
||||
If the number is positive, indicates the number of
|
||||
@@ -117,7 +134,7 @@ public:
|
||||
bool active;
|
||||
|
||||
/** Time when the peer became active. */
|
||||
DiscreteTime whenActive;
|
||||
clock_type::time_point whenActive;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -139,14 +156,14 @@ public:
|
||||
Entry const& lhs (lhs_iter->second);
|
||||
Entry const& rhs (rhs_iter->second);
|
||||
// Higher cumulative uptime always wins
|
||||
if (lhs.cumulativeUptimeSeconds > rhs.cumulativeUptimeSeconds)
|
||||
if (lhs.cumulativeUptime > rhs.cumulativeUptime)
|
||||
return true;
|
||||
else if (lhs.cumulativeUptimeSeconds <= rhs.cumulativeUptimeSeconds
|
||||
&& rhs.cumulativeUptimeSeconds != 0)
|
||||
else if (lhs.cumulativeUptime <= rhs.cumulativeUptime
|
||||
&& rhs.cumulativeUptime.count() != 0)
|
||||
return false;
|
||||
// At this point both uptimes will be zero
|
||||
consistency_check (lhs.cumulativeUptimeSeconds == 0 &&
|
||||
rhs.cumulativeUptimeSeconds == 0);
|
||||
consistency_check (lhs.cumulativeUptime.count() == 0 &&
|
||||
rhs.cumulativeUptime.count() == 0);
|
||||
if (lhs.connectionValence > rhs.connectionValence)
|
||||
return true;
|
||||
return false;
|
||||
@@ -160,24 +177,24 @@ public:
|
||||
typedef std::vector <Entries::iterator> SortedEntries;
|
||||
|
||||
Store& m_store;
|
||||
DiscreteClock <DiscreteTime> m_clock;
|
||||
clock_type& m_clock;
|
||||
Journal m_journal;
|
||||
Entries m_entries;
|
||||
|
||||
// Time after which we can update the database again
|
||||
DiscreteTime m_whenUpdate;
|
||||
clock_type::time_point m_whenUpdate;
|
||||
|
||||
// Set to true when a database update is needed
|
||||
bool m_needsUpdate;
|
||||
|
||||
Bootcache (
|
||||
Store& store,
|
||||
DiscreteClock <DiscreteTime> clock,
|
||||
clock_type& clock,
|
||||
Journal journal)
|
||||
: m_store (store)
|
||||
, m_clock (clock)
|
||||
, m_journal (journal)
|
||||
, m_whenUpdate (clock())
|
||||
, m_whenUpdate (m_clock.now ())
|
||||
{
|
||||
}
|
||||
|
||||
@@ -206,7 +223,7 @@ public:
|
||||
{
|
||||
++count;
|
||||
Entry& entry (result.first->second);
|
||||
entry.cumulativeUptimeSeconds = iter->cumulativeUptimeSeconds;
|
||||
entry.cumulativeUptime = iter->cumulativeUptime;
|
||||
entry.connectionValence = iter->connectionValence;
|
||||
}
|
||||
else
|
||||
@@ -254,7 +271,7 @@ public:
|
||||
for (Entries::const_iterator iter (m_entries.begin ());
|
||||
iter != m_entries.end (); ++iter)
|
||||
result.emplace_back (iter->first,
|
||||
iter->second.cumulativeUptimeSeconds,
|
||||
iter->second.cumulativeUptime,
|
||||
iter->second.connectionValence);
|
||||
return result;
|
||||
}
|
||||
@@ -295,8 +312,8 @@ public:
|
||||
// with resetting uptime to prevent the entire network
|
||||
// from settling on just a handful of addresses.
|
||||
//
|
||||
entry.cumulativeUptimeSeconds = 0;
|
||||
entry.sessionUptimeSeconds = 0 ;
|
||||
entry.cumulativeUptime = std::chrono::seconds (0);
|
||||
entry.sessionUptime = std::chrono::seconds (0);
|
||||
// Increment the number of consecutive failures.
|
||||
if (entry.connectionValence > 0)
|
||||
entry.connectionValence = 0;
|
||||
@@ -320,7 +337,7 @@ public:
|
||||
// Can't already be active!
|
||||
consistency_check (! entry.active);
|
||||
// Reset session uptime
|
||||
entry.sessionUptimeSeconds = 0;
|
||||
entry.sessionUptime = std::chrono::seconds (0);
|
||||
// Count this as a connection success
|
||||
if (entry.connectionValence < 0)
|
||||
entry.connectionValence = 0;
|
||||
@@ -329,7 +346,7 @@ public:
|
||||
if (action == doActivate)
|
||||
{
|
||||
entry.active = true;
|
||||
entry.whenActive = m_clock();
|
||||
entry.whenActive = m_clock.now();
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -359,17 +376,20 @@ public:
|
||||
// Must exist!
|
||||
consistency_check (! result.second);
|
||||
Entry& entry (result.first->second);
|
||||
entry.update (m_clock());
|
||||
entry.update (m_clock.now());
|
||||
flagForUpdate();
|
||||
}
|
||||
|
||||
template <typename Seconds>
|
||||
static std::string uptime_phrase (Seconds seconds)
|
||||
template <class Rep, class Period>
|
||||
static std::string uptime_phrase (
|
||||
std::chrono::duration <Rep, Period> const& elapsed)
|
||||
{
|
||||
if (seconds > 0)
|
||||
return std::string (" with ") +
|
||||
RelativeTime (seconds).to_string() +
|
||||
" uptime";
|
||||
if (elapsed.count() > 0)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << " with " << elapsed << " uptime";
|
||||
return ss.str();
|
||||
}
|
||||
return std::string ();
|
||||
}
|
||||
/** Called when an active outbound connection closes. */
|
||||
@@ -383,9 +403,9 @@ public:
|
||||
consistency_check (entry.active);
|
||||
if (m_journal.trace) m_journal.trace << leftw (18) <<
|
||||
"Bootcache close " << address <<
|
||||
uptime_phrase (entry.cumulativeUptimeSeconds);
|
||||
entry.update (m_clock());
|
||||
entry.sessionUptimeSeconds = 0;
|
||||
uptime_phrase (entry.cumulativeUptime);
|
||||
entry.update (m_clock.now());
|
||||
entry.sessionUptime = std::chrono::seconds (0);
|
||||
entry.active = false;
|
||||
flagForUpdate();
|
||||
}
|
||||
@@ -420,7 +440,7 @@ public:
|
||||
{
|
||||
ss << std::endl <<
|
||||
(*iter)->first << ", " <<
|
||||
RelativeTime ((*iter)->second.cumulativeUptimeSeconds) << ", "
|
||||
(*iter)->second.cumulativeUptime << ", "
|
||||
<< valenceString ((*iter)->second.connectionValence);
|
||||
if ((*iter)->second.active)
|
||||
ss <<
|
||||
@@ -429,9 +449,6 @@ public:
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
//
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
// Returns a vector of entry iterators sorted by descending score
|
||||
std::vector <Entries::const_iterator> csort () const
|
||||
@@ -480,7 +497,7 @@ private:
|
||||
continue;
|
||||
if (m_journal.trace) m_journal.trace << leftw (18) <<
|
||||
"Bootcache pruned" << (*iter)->first <<
|
||||
uptime_phrase (entry.cumulativeUptimeSeconds) <<
|
||||
uptime_phrase (entry.cumulativeUptime) <<
|
||||
" and valence " << entry.connectionValence;
|
||||
m_entries.erase (*iter);
|
||||
--count;
|
||||
@@ -504,20 +521,20 @@ private:
|
||||
{
|
||||
Store::SavedBootstrapAddress entry;
|
||||
entry.address = iter->first;
|
||||
entry.cumulativeUptimeSeconds = iter->second.cumulativeUptimeSeconds;
|
||||
entry.cumulativeUptime = iter->second.cumulativeUptime;
|
||||
entry.connectionValence = iter->second.connectionValence;
|
||||
list.push_back (entry);
|
||||
}
|
||||
m_store.updateBootstrapCache (list);
|
||||
// Reset the flag and cooldown timer
|
||||
m_needsUpdate = false;
|
||||
m_whenUpdate = m_clock() + Tuning::bootcacheCooldownSeconds;
|
||||
m_whenUpdate = m_clock.now() + Tuning::bootcacheCooldownTime;
|
||||
}
|
||||
|
||||
// Checks the clock and calls update if we are off the cooldown.
|
||||
void checkUpdate ()
|
||||
{
|
||||
if (m_needsUpdate && m_whenUpdate < m_clock())
|
||||
if (m_needsUpdate && m_whenUpdate < m_clock.now())
|
||||
update ();
|
||||
}
|
||||
|
||||
|
||||
366
src/ripple/peerfinder/impl/Counts.h
Normal file
366
src/ripple/peerfinder/impl/Counts.h
Normal file
@@ -0,0 +1,366 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_COUNTS_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_COUNTS_H_INCLUDED
|
||||
|
||||
#include "../api/Slot.h"
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Manages the count of available connections for the various slots. */
|
||||
class Counts
|
||||
{
|
||||
public:
|
||||
explicit Counts (clock_type& clock)
|
||||
: m_clock (clock)
|
||||
, m_attempts (0)
|
||||
, m_active (0)
|
||||
, m_in_max (0)
|
||||
, m_in_active (0)
|
||||
, m_out_max (0)
|
||||
, m_out_active (0)
|
||||
, m_fixed (0)
|
||||
, m_fixed_active (0)
|
||||
, m_cluster (0)
|
||||
|
||||
, m_acceptCount (0)
|
||||
, m_closingCount (0)
|
||||
{
|
||||
#if 0
|
||||
std::random_device rd;
|
||||
std::mt19937 gen (rd());
|
||||
m_roundingThreshold =
|
||||
std::generate_canonical <double, 10> (gen);
|
||||
#else
|
||||
m_roundingThreshold = Random::getSystemRandom().nextDouble();
|
||||
#endif
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Adds the slot state and properties to the slot counts. */
|
||||
void add (Slot const& s)
|
||||
{
|
||||
adjust (s, 1);
|
||||
}
|
||||
|
||||
/** Removes the slot state and properties from the slot counts. */
|
||||
void remove (Slot const& s)
|
||||
{
|
||||
adjust (s, -1);
|
||||
}
|
||||
|
||||
/** Returns `true` if the slot can become active. */
|
||||
bool can_activate (Slot const& s) const
|
||||
{
|
||||
// Must be handshaked and in the right state
|
||||
assert (s.state() == Slot::connected || s.state() == Slot::accept);
|
||||
|
||||
if (s.fixed () || s.cluster ())
|
||||
return true;
|
||||
|
||||
if (s.inbound ())
|
||||
return m_in_active < m_in_max;
|
||||
|
||||
return m_out_active < m_out_max;
|
||||
}
|
||||
|
||||
/** Returns the number of attempts needed to bring us to the max. */
|
||||
std::size_t attempts_needed () const
|
||||
{
|
||||
if (m_attempts >= Tuning::maxConnectAttempts)
|
||||
return 0;
|
||||
return Tuning::maxConnectAttempts - m_attempts;
|
||||
}
|
||||
|
||||
/** Returns the number of outbound connection attempts. */
|
||||
std::size_t attempts () const
|
||||
{
|
||||
return m_attempts;
|
||||
};
|
||||
|
||||
/** Returns the total number of outbound slots. */
|
||||
int out_max () const
|
||||
{
|
||||
return m_out_max;
|
||||
}
|
||||
|
||||
/** Returns the number of outbound peers assigned an open slot.
|
||||
Fixed peers do not count towards outbound slots used.
|
||||
*/
|
||||
int out_active () const
|
||||
{
|
||||
return m_out_active;
|
||||
}
|
||||
|
||||
/** Returns the number of fixed connections. */
|
||||
std::size_t fixed () const
|
||||
{
|
||||
return m_fixed;
|
||||
}
|
||||
|
||||
/** Returns the number of active fixed connections. */
|
||||
std::size_t fixed_active () const
|
||||
{
|
||||
return m_fixed_active;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Called when the config is set or changed. */
|
||||
void onConfig (Config const& config)
|
||||
{
|
||||
// Calculate the number of outbound peers we want. If we dont want or can't
|
||||
// accept incoming, this will simply be equal to maxPeers. Otherwise
|
||||
// we calculate a fractional amount based on percentages and pseudo-randomly
|
||||
// round up or down.
|
||||
//
|
||||
if (config.wantIncoming)
|
||||
{
|
||||
// Round outPeers upwards using a Bernoulli distribution
|
||||
m_out_max = std::floor (config.outPeers);
|
||||
if (m_roundingThreshold < (config.outPeers - m_out_max))
|
||||
++m_out_max;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_out_max = config.maxPeers;
|
||||
}
|
||||
|
||||
// Calculate the largest number of inbound connections we could take.
|
||||
if (config.maxPeers >= m_out_max)
|
||||
m_in_max = config.maxPeers - m_out_max;
|
||||
else
|
||||
m_in_max = 0;
|
||||
}
|
||||
|
||||
/** Returns the number of accepted connections that haven't handshaked. */
|
||||
int acceptCount() const
|
||||
{
|
||||
return m_acceptCount;
|
||||
}
|
||||
|
||||
/** Returns the number of connection attempts currently active. */
|
||||
int connectCount() const
|
||||
{
|
||||
return m_attempts;
|
||||
}
|
||||
|
||||
/** Returns the number of connections that are gracefully closing. */
|
||||
int closingCount () const
|
||||
{
|
||||
return m_closingCount;
|
||||
}
|
||||
|
||||
/** Returns the total number of inbound slots. */
|
||||
int inboundSlots () const
|
||||
{
|
||||
return m_in_max;
|
||||
}
|
||||
|
||||
/** Returns the number of inbound peers assigned an open slot. */
|
||||
int inboundActive () const
|
||||
{
|
||||
return m_in_active;
|
||||
}
|
||||
|
||||
/** Returns the total number of active peers excluding fixed peers. */
|
||||
int totalActive () const
|
||||
{
|
||||
return m_in_active + m_out_active;
|
||||
}
|
||||
|
||||
/** Returns the number of unused inbound slots.
|
||||
Fixed peers do not deduct from inbound slots or count towards totals.
|
||||
*/
|
||||
int inboundSlotsFree () const
|
||||
{
|
||||
if (m_in_active < m_in_max)
|
||||
return m_in_max - m_in_active;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Returns the number of unused outbound slots.
|
||||
Fixed peers do not deduct from outbound slots or count towards totals.
|
||||
*/
|
||||
int outboundSlotsFree () const
|
||||
{
|
||||
if (m_out_active < m_out_max)
|
||||
return m_out_max - m_out_active;
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Returns the number of new connection attempts we should make. */
|
||||
int additionalAttemptsNeeded () const
|
||||
{
|
||||
// Don't go over the maximum concurrent attempt limit
|
||||
if (m_attempts >= Tuning::maxConnectAttempts)
|
||||
return 0;
|
||||
int needed (outboundSlotsFree ());
|
||||
// This is the most we could attempt right now
|
||||
int const available (
|
||||
Tuning::maxConnectAttempts - m_attempts);
|
||||
//return std::min (needed, available);
|
||||
return available;
|
||||
}
|
||||
|
||||
/** Returns true if the slot logic considers us "connected" to the network. */
|
||||
bool isConnectedToNetwork () const
|
||||
{
|
||||
// We will consider ourselves connected if we have reached
|
||||
// the number of outgoing connections desired, or if connect
|
||||
// automatically is false.
|
||||
//
|
||||
// Fixed peers do not count towards the active outgoing total.
|
||||
|
||||
if (m_out_max > 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Output statistics. */
|
||||
void onWrite (PropertyStream::Map& map)
|
||||
{
|
||||
map ["accept"] = acceptCount ();
|
||||
map ["connect"] = connectCount ();
|
||||
map ["close"] = closingCount ();
|
||||
map ["in"] << m_in_active << "/" << m_in_max;
|
||||
map ["out"] << m_out_active << "/" << m_out_max;
|
||||
map ["fixed"] = m_fixed_active;
|
||||
map ["cluster"] = m_cluster;
|
||||
map ["total"] = m_active;
|
||||
}
|
||||
|
||||
/** Records the state for diagnostics. */
|
||||
std::string state_string () const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss <<
|
||||
m_out_active << "/" << m_out_max << " out, " <<
|
||||
m_in_active << "/" << m_in_max << " in, " <<
|
||||
connectCount() << " connecting, " <<
|
||||
closingCount() << " closing"
|
||||
;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
private:
|
||||
// Adjusts counts based on the specified slot, in the direction indicated.
|
||||
void adjust (Slot const& s, int const n)
|
||||
{
|
||||
if (s.fixed ())
|
||||
m_fixed += n;
|
||||
|
||||
if (s.cluster ())
|
||||
m_cluster += n;
|
||||
|
||||
switch (s.state ())
|
||||
{
|
||||
case Slot::accept:
|
||||
assert (s.inbound ());
|
||||
m_acceptCount += n;
|
||||
break;
|
||||
|
||||
case Slot::connect:
|
||||
case Slot::connected:
|
||||
assert (! s.inbound ());
|
||||
m_attempts += n;
|
||||
break;
|
||||
|
||||
case Slot::active:
|
||||
if (s.fixed ())
|
||||
m_fixed_active += n;
|
||||
if (! s.fixed () && ! s.cluster ())
|
||||
{
|
||||
if (s.inbound ())
|
||||
m_in_active += n;
|
||||
else
|
||||
m_out_active += n;
|
||||
}
|
||||
m_active += n;
|
||||
break;
|
||||
|
||||
case Slot::closing:
|
||||
m_closingCount += n;
|
||||
break;
|
||||
|
||||
default:
|
||||
assert (false);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
private:
|
||||
clock_type& m_clock;
|
||||
|
||||
/** Outbound connection attempts. */
|
||||
int m_attempts;
|
||||
|
||||
/** Active connections, including fixed and cluster. */
|
||||
std::size_t m_active;
|
||||
|
||||
/** Total number of inbound slots. */
|
||||
std::size_t m_in_max;
|
||||
|
||||
/** Number of inbound slots assigned to active peers. */
|
||||
std::size_t m_in_active;
|
||||
|
||||
/** Maximum desired outbound slots. */
|
||||
std::size_t m_out_max;
|
||||
|
||||
/** Active outbound slots. */
|
||||
std::size_t m_out_active;
|
||||
|
||||
/** Fixed connections. */
|
||||
std::size_t m_fixed;
|
||||
|
||||
/** Active fixed connections. */
|
||||
std::size_t m_fixed_active;
|
||||
|
||||
/** Cluster connections. */
|
||||
std::size_t m_cluster;
|
||||
|
||||
|
||||
|
||||
|
||||
// Number of inbound connections that are
|
||||
// not active or gracefully closing.
|
||||
int m_acceptCount;
|
||||
|
||||
// Number of connections that are gracefully closing.
|
||||
int m_closingCount;
|
||||
|
||||
/** Fractional threshold below which we round down.
|
||||
This is used to round the value of Config::outPeers up or down in
|
||||
such a way that the network-wide average number of outgoing
|
||||
connections approximates the recommended, fractional value.
|
||||
*/
|
||||
double m_roundingThreshold;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -17,38 +17,51 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_LOGICTYPE_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_LOGICTYPE_H_INCLUDED
|
||||
#ifndef RIPPLE_PEERFINDER_FIXED_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_FIXED_H_INCLUDED
|
||||
|
||||
#include "Tuning.h"
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
template <class DiscreteClockSourceType>
|
||||
class LogicType
|
||||
: private BaseFromMember <DiscreteClockSourceType>
|
||||
, public Logic
|
||||
/** Metadata for a Fixed slot. */
|
||||
class Fixed
|
||||
{
|
||||
public:
|
||||
typedef typename DiscreteClockSourceType::DiscreteClockType DiscreteClockType;
|
||||
|
||||
LogicType (
|
||||
Callback& callback,
|
||||
Store& store,
|
||||
Checker& checker,
|
||||
Journal journal)
|
||||
: Logic (
|
||||
BaseFromMember <DiscreteClockSourceType>::member(),
|
||||
callback,
|
||||
store,
|
||||
checker,
|
||||
journal)
|
||||
explicit Fixed (clock_type& clock)
|
||||
: m_when (clock.now ())
|
||||
, m_failures (0)
|
||||
{
|
||||
}
|
||||
|
||||
DiscreteClockSourceType& get_clock()
|
||||
Fixed (Fixed const&) = default;
|
||||
|
||||
/** Returns the time after which we shoud allow a connection attempt. */
|
||||
clock_type::time_point const& when () const
|
||||
{
|
||||
return BaseFromMember <DiscreteClockSourceType>::member();
|
||||
return m_when;
|
||||
}
|
||||
|
||||
/** Updates metadata to reflect a failed connection. */
|
||||
void failure (clock_type::time_point const& now)
|
||||
{
|
||||
m_failures = std::min (m_failures + 1,
|
||||
Tuning::connectionBackoff.size() - 1);
|
||||
m_when = now + std::chrono::minutes (
|
||||
Tuning::connectionBackoff [m_failures]);
|
||||
}
|
||||
|
||||
/** Updates metadata to reflect a successful connection. */
|
||||
void success (clock_type::time_point const& now)
|
||||
{
|
||||
m_failures = 0;
|
||||
m_when = now;
|
||||
}
|
||||
|
||||
private:
|
||||
clock_type::time_point m_when;
|
||||
std::size_t m_failures;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_FIXEDPEER_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_FIXEDPEER_H_INCLUDED
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Stores information about a fixed peer.
|
||||
A fixed peer is defined in the config file and can be specified using
|
||||
either an IP address or a hostname (which may resolve to zero or more
|
||||
addresses).
|
||||
A fixed peer which has multiple IP addresses is considered connected
|
||||
if there is a connection to any one of its addresses.
|
||||
*/
|
||||
class FixedPeer
|
||||
{
|
||||
public:
|
||||
/* The config name */
|
||||
std::string const m_name;
|
||||
|
||||
/* The corresponding IP address(es) */
|
||||
IPAddresses m_addresses;
|
||||
|
||||
FixedPeer (std::string const& name,
|
||||
IPAddresses const& addresses)
|
||||
: m_name (name)
|
||||
, m_addresses (addresses)
|
||||
{
|
||||
bassert (!m_addresses.empty ());
|
||||
|
||||
// NIKB TODO add support for multiple IPs
|
||||
m_addresses.resize (1);
|
||||
}
|
||||
|
||||
// NIKB TODO support peers which resolve to more than a single address
|
||||
IPAddress getAddress () const
|
||||
{
|
||||
if (m_addresses.size ())
|
||||
return m_addresses.at(0);
|
||||
|
||||
return IPAddress ();
|
||||
}
|
||||
|
||||
template <typename Comparator>
|
||||
bool hasAddress (IPAddress const& address, Comparator compare) const
|
||||
{
|
||||
for (IPAddresses::const_iterator iter = m_addresses.cbegin();
|
||||
iter != m_addresses.cend(); ++iter)
|
||||
{
|
||||
if (compare (*iter, address))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -23,7 +23,7 @@ namespace PeerFinder {
|
||||
class LivecacheTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
ManualClock m_clock_source;
|
||||
manual_clock <clock_type::duration> m_clock;
|
||||
|
||||
// Add the address as an endpoint
|
||||
void add (uint32 index, uint16 port, Livecache& c)
|
||||
@@ -39,7 +39,7 @@ public:
|
||||
{
|
||||
beginTestCase ("fetch");
|
||||
|
||||
Livecache c (m_clock_source, Journal());
|
||||
Livecache c (m_clock, Journal());
|
||||
|
||||
add (1, 1, c);
|
||||
add (2, 1, c);
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
#ifndef RIPPLE_PEERFINDER_LIVECACHE_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_LIVECACHE_H_INCLUDED
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
@@ -44,20 +46,21 @@ public:
|
||||
|
||||
struct Entry : public EntryList::Node
|
||||
{
|
||||
Entry (Endpoint const& endpoint_, DiscreteTime whenExpires_)
|
||||
Entry (Endpoint const& endpoint_,
|
||||
clock_type::time_point const& whenExpires_)
|
||||
: endpoint (endpoint_)
|
||||
, whenExpires (whenExpires_)
|
||||
{
|
||||
}
|
||||
|
||||
Endpoint endpoint;
|
||||
DiscreteTime whenExpires;
|
||||
clock_type::time_point whenExpires;
|
||||
};
|
||||
|
||||
typedef std::set <Endpoint, LessEndpoints> SortedTable;
|
||||
typedef boost::unordered_map <IPAddress, Entry> AddressTable;
|
||||
typedef std::unordered_map <IPAddress, Entry> AddressTable;
|
||||
|
||||
DiscreteClock <DiscreteTime> m_clock;
|
||||
clock_type& m_clock;
|
||||
Journal m_journal;
|
||||
AddressTable m_byAddress;
|
||||
SortedTable m_bySorted;
|
||||
@@ -68,8 +71,8 @@ public:
|
||||
|
||||
public:
|
||||
/** Create the cache. */
|
||||
explicit Livecache (
|
||||
DiscreteClock <DiscreteTime> clock,
|
||||
Livecache (
|
||||
clock_type& clock,
|
||||
Journal journal)
|
||||
: m_clock (clock)
|
||||
, m_journal (journal)
|
||||
@@ -91,7 +94,7 @@ public:
|
||||
/** Erase entries whose time has expired. */
|
||||
void sweep ()
|
||||
{
|
||||
DiscreteTime const now (m_clock());
|
||||
auto const now (m_clock.now ());
|
||||
AddressTable::size_type count (0);
|
||||
for (EntryList::iterator iter (m_list.begin());
|
||||
iter != m_list.end();)
|
||||
@@ -124,13 +127,12 @@ public:
|
||||
{
|
||||
// Caller is responsible for validation
|
||||
check_precondition (endpoint.hops <= Tuning::maxHops);
|
||||
DiscreteTime const now (m_clock());
|
||||
DiscreteTime const whenExpires (
|
||||
now + Tuning::liveCacheSecondsToLive);
|
||||
auto now (m_clock.now ());
|
||||
auto const whenExpires (now + Tuning::liveCacheSecondsToLive);
|
||||
std::pair <AddressTable::iterator, bool> result (
|
||||
m_byAddress.emplace (boost::unordered::piecewise_construct,
|
||||
boost::make_tuple (endpoint.address),
|
||||
boost::make_tuple (endpoint, whenExpires)));
|
||||
m_byAddress.emplace (std::piecewise_construct,
|
||||
std::make_tuple (endpoint.address),
|
||||
std::make_tuple (endpoint, whenExpires)));
|
||||
Entry& entry (result.first->second);
|
||||
// Drop duplicates at higher hops
|
||||
if (! result.second && (endpoint.hops > entry.endpoint.hops))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,11 +30,12 @@ class ManagerImp
|
||||
public:
|
||||
ServiceQueue m_queue;
|
||||
SiteFiles::Manager& m_siteFiles;
|
||||
clock_type& m_clock;
|
||||
Journal m_journal;
|
||||
StoreSqdb m_store;
|
||||
SerializedContext m_context;
|
||||
CheckerAdapter m_checker;
|
||||
LogicType <SimpleMonotonicClock> m_logic;
|
||||
Logic m_logic;
|
||||
DeadlineTimer m_connectTimer;
|
||||
DeadlineTimer m_messageTimer;
|
||||
DeadlineTimer m_cacheTimer;
|
||||
@@ -45,14 +46,16 @@ public:
|
||||
Stoppable& stoppable,
|
||||
SiteFiles::Manager& siteFiles,
|
||||
Callback& callback,
|
||||
clock_type& clock,
|
||||
Journal journal)
|
||||
: Manager (stoppable)
|
||||
, Thread ("PeerFinder")
|
||||
, m_siteFiles (siteFiles)
|
||||
, m_clock (clock)
|
||||
, m_journal (journal)
|
||||
, m_store (journal)
|
||||
, m_checker (m_context, m_queue)
|
||||
, m_logic (callback, m_store, m_checker, journal)
|
||||
, m_logic (clock, callback, m_store, m_checker, journal)
|
||||
, m_connectTimer (this)
|
||||
, m_messageTimer (this)
|
||||
, m_cacheTimer (this)
|
||||
@@ -103,72 +106,48 @@ public:
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void onPeerAccept (IPAddress const& local_address,
|
||||
IPAddress const& remote_address)
|
||||
Slot::ptr new_inbound_slot (
|
||||
IP::Endpoint const& local_endpoint,
|
||||
IP::Endpoint const& remote_endpoint)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::onPeerAccept, &m_logic,
|
||||
local_address, remote_address)));
|
||||
return m_logic.new_inbound_slot (local_endpoint, remote_endpoint);
|
||||
}
|
||||
|
||||
void onPeerConnect (IPAddress const& address)
|
||||
Slot::ptr new_outbound_slot (IP::Endpoint const& remote_endpoint)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::onPeerConnect, &m_logic,
|
||||
address)));
|
||||
return m_logic.new_outbound_slot (remote_endpoint);
|
||||
}
|
||||
|
||||
void onPeerConnected (IPAddress const& local_address,
|
||||
IPAddress const& remote_address)
|
||||
void on_connected (Slot::ptr const& slot,
|
||||
IP::Endpoint const& local_endpoint)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::onPeerConnected, &m_logic,
|
||||
local_address, remote_address)));
|
||||
SlotImp::ptr impl (std::dynamic_pointer_cast <SlotImp> (slot));
|
||||
m_logic.on_connected (impl, local_endpoint);
|
||||
}
|
||||
|
||||
void onPeerAddressChanged (
|
||||
IPAddress const& currentAddress, IPAddress const& newAddress)
|
||||
void on_handshake (Slot::ptr const& slot,
|
||||
RipplePublicKey const& key, bool cluster)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::onPeerAddressChanged, &m_logic,
|
||||
currentAddress, newAddress)));
|
||||
SlotImp::ptr impl (std::dynamic_pointer_cast <SlotImp> (slot));
|
||||
m_logic.on_handshake (impl, key, cluster);
|
||||
}
|
||||
|
||||
void onPeerHandshake (IPAddress const& address, PeerID const& id,
|
||||
bool cluster)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::onPeerHandshake, &m_logic,
|
||||
address, id, cluster)));
|
||||
}
|
||||
|
||||
void onPeerClosed (IPAddress const& address)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::onPeerClosed, &m_logic,
|
||||
address)));
|
||||
}
|
||||
|
||||
void onPeerEndpoints (IPAddress const& address,
|
||||
void on_endpoints (Slot::ptr const& slot,
|
||||
Endpoints const& endpoints)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
beast::bind (&Logic::onPeerEndpoints, &m_logic,
|
||||
address, endpoints));
|
||||
SlotImp::ptr impl (std::dynamic_pointer_cast <SlotImp> (slot));
|
||||
m_logic.on_endpoints (impl, endpoints);
|
||||
}
|
||||
|
||||
void onLegacyEndpoints (IPAddresses const& addresses)
|
||||
void on_legacy_endpoints (IPAddresses const& addresses)
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
beast::bind (&Logic::onLegacyEndpoints, &m_logic,
|
||||
addresses)));
|
||||
m_logic.on_legacy_endpoints (addresses);
|
||||
}
|
||||
|
||||
void on_closed (Slot::ptr const& slot)
|
||||
{
|
||||
SlotImp::ptr impl (std::dynamic_pointer_cast <SlotImp> (slot));
|
||||
m_logic.on_closed (impl);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -281,7 +260,7 @@ public:
|
||||
{
|
||||
m_queue.dispatch (
|
||||
m_context.wrap (
|
||||
bind (&Logic::sendEndpoints, &m_logic)));
|
||||
bind (&Logic::broadcast, &m_logic)));
|
||||
|
||||
m_messageTimer.setExpiration (Tuning::secondsPerMessage);
|
||||
}
|
||||
@@ -359,9 +338,10 @@ Manager* Manager::New (
|
||||
Stoppable& parent,
|
||||
SiteFiles::Manager& siteFiles,
|
||||
Callback& callback,
|
||||
clock_type& clock,
|
||||
Journal journal)
|
||||
{
|
||||
return new ManagerImp (parent, siteFiles, callback, journal);
|
||||
return new ManagerImp (parent, siteFiles, callback, clock, journal);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -23,9 +23,6 @@
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Time in seconds since some baseline event in the past. */
|
||||
typedef int DiscreteTime;
|
||||
|
||||
/** Indicates the action the logic will take after a handshake. */
|
||||
enum HandshakeAction
|
||||
{
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#if 0
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
class ResolverImp
|
||||
: public Resolver
|
||||
, private Thread
|
||||
, private LeakChecked <ResolverImp>
|
||||
{
|
||||
private:
|
||||
class Request;
|
||||
|
||||
struct State
|
||||
{
|
||||
List <Request> list;
|
||||
};
|
||||
|
||||
typedef SharedData <State> SharedState;
|
||||
|
||||
SharedState m_state;
|
||||
boost::asio::io_service m_io_service;
|
||||
boost::optional <boost::asio::io_service::work> m_work;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
static boost::asio::ip::tcp::endpoint fromIPAddress (
|
||||
IPAddress const& ipEndpoint)
|
||||
{
|
||||
if (ipEndpoint.is_v4 ())
|
||||
{
|
||||
return boost::asio::ip::tcp::endpoint (
|
||||
boost::asio::ip::address_v4 (
|
||||
ipEndpoint.to_v4().value),
|
||||
ipEndpoint.port ());
|
||||
}
|
||||
bassertfalse;
|
||||
return boost::asio::ip::tcp::endpoint ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
class Request
|
||||
: public SharedObject
|
||||
, public List <Request>::Node
|
||||
, private LeakChecked <Request>
|
||||
{
|
||||
public:
|
||||
typedef SharedPtr <Request> Ptr;
|
||||
typedef boost::asio::ip::tcp Protocol;
|
||||
typedef boost::system::error_code error_code;
|
||||
typedef Protocol::socket socket_type;
|
||||
typedef Protocol::endpoint endpoint_type;
|
||||
|
||||
ResolverImp& m_owner;
|
||||
boost::asio::io_service& m_io_service;
|
||||
IPAddress m_address;
|
||||
AbstractHandler <void (Result)> m_handler;
|
||||
socket_type m_socket;
|
||||
boost::system::error_code m_error;
|
||||
bool m_canAccept;
|
||||
|
||||
Request (ResolverImp& owner, boost::asio::io_service& io_service,
|
||||
IPAddress const& address, AbstractHandler <void (Result)> handler)
|
||||
: m_owner (owner)
|
||||
, m_io_service (io_service)
|
||||
, m_address (address)
|
||||
, m_handler (handler)
|
||||
, m_socket (m_io_service)
|
||||
, m_canAccept (false)
|
||||
{
|
||||
m_owner.add (*this);
|
||||
|
||||
m_socket.async_connect (fromIPAddress (m_address),
|
||||
wrapHandler (boost::bind (&Request::handle_connect, Ptr(this),
|
||||
boost::asio::placeholders::error), m_handler));
|
||||
}
|
||||
|
||||
~Request ()
|
||||
{
|
||||
Result result;
|
||||
result.address = m_address;
|
||||
result.error = m_error;
|
||||
m_io_service.wrap (m_handler) (result);
|
||||
|
||||
m_owner.remove (*this);
|
||||
}
|
||||
|
||||
void cancel ()
|
||||
{
|
||||
m_socket.cancel();
|
||||
}
|
||||
|
||||
void handle_connect (boost::system::error_code ec)
|
||||
{
|
||||
m_error = ec;
|
||||
if (ec)
|
||||
return;
|
||||
|
||||
m_canAccept = true;
|
||||
}
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void add (Request& request)
|
||||
{
|
||||
SharedState::Access state (m_state);
|
||||
state->list.push_back (request);
|
||||
}
|
||||
|
||||
void remove (Request& request)
|
||||
{
|
||||
SharedState::Access state (m_state);
|
||||
state->list.erase (state->list.iterator_to (request));
|
||||
}
|
||||
|
||||
void run ()
|
||||
{
|
||||
m_io_service.run ();
|
||||
}
|
||||
|
||||
public:
|
||||
ResolverImp ()
|
||||
: Thread ("PeerFinder::Resolver")
|
||||
, m_work (boost::in_place (boost::ref (m_io_service)))
|
||||
{
|
||||
startThread ();
|
||||
}
|
||||
|
||||
~ResolverImp ()
|
||||
{
|
||||
// cancel pending i/o
|
||||
cancel();
|
||||
|
||||
// destroy the io_service::work object
|
||||
m_work = boost::none;
|
||||
|
||||
// signal and wait for the thread to exit gracefully
|
||||
stopThread ();
|
||||
}
|
||||
|
||||
void cancel ()
|
||||
{
|
||||
SharedState::Access state (m_state);
|
||||
for (List <Request>::iterator iter (state->list.begin());
|
||||
iter != state->list.end(); ++iter)
|
||||
iter->cancel();
|
||||
}
|
||||
|
||||
void async_test (IPAddress const& endpoint,
|
||||
AbstractHandler <void (Result)> handler)
|
||||
{
|
||||
new Request (*this, m_io_service, endpoint, handler);
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
Resolver* Resolver::New ()
|
||||
{
|
||||
return new ResolverImp;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -1,88 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_RESOLVER_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_RESOLVER_H_INCLUDED
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Performs asynchronous domain name resolution. */
|
||||
class Resolver
|
||||
{
|
||||
public:
|
||||
/** Create the service.
|
||||
This will automatically start the associated thread and io_service.
|
||||
*/
|
||||
static Resolver* New ();
|
||||
|
||||
/** Destroy the service.
|
||||
Any pending I/O operations will be canceled. This call blocks until
|
||||
all pending operations complete (either with success or with
|
||||
operation_aborted) and the associated thread and io_service have
|
||||
no more work remaining.
|
||||
*/
|
||||
virtual ~Resolver () { }
|
||||
|
||||
/** Cancel pending I/O.
|
||||
This issues cancel orders for all pending I/O operations and then
|
||||
returns immediately. Handlers will receive operation_aborted errors,
|
||||
or if they were already queued they will complete normally.
|
||||
*/
|
||||
virtual void cancel () = 0;
|
||||
|
||||
struct Result
|
||||
{
|
||||
Result ()
|
||||
{ }
|
||||
|
||||
/** The original name string */
|
||||
std::string name;
|
||||
|
||||
/** The error code from the operation. */
|
||||
boost::system::error_code error;
|
||||
|
||||
/** The resolved address.
|
||||
Only defined if there is no error.
|
||||
If the original name string contains a port specification,
|
||||
it will be set in the resolved IPAddress.
|
||||
*/
|
||||
IPAddress address;
|
||||
};
|
||||
|
||||
/** Performs an async resolution on the specified name.
|
||||
The port information, if present, will be passed through.
|
||||
*/
|
||||
template <typename Handler>
|
||||
void async_resolve (std::string const& name,
|
||||
BEAST_MOVE_ARG(Handler) handler)
|
||||
{
|
||||
async_resolve (name,
|
||||
AbstractHandler <void (Result)> (
|
||||
BEAST_MOVE_CAST(Handler)(handler)));
|
||||
}
|
||||
|
||||
virtual void async_resolve (std::string const& name,
|
||||
AbstractHandler <void (Result)> handler) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -17,147 +17,148 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_PEER_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_PEER_H_INCLUDED
|
||||
#ifndef RIPPLE_PEERFINDER_SLOTIMP_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_SLOTIMP_H_INCLUDED
|
||||
|
||||
#include "../api/Slot.h"
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
/** Metadata for an open peer socket. */
|
||||
class Peer
|
||||
class SlotImp : public Slot
|
||||
{
|
||||
public:
|
||||
enum State
|
||||
{
|
||||
/** Accepted inbound connection, no handshake. */
|
||||
stateAccept,
|
||||
typedef std::shared_ptr <SlotImp> ptr;
|
||||
|
||||
/** Outbound connection attempt. */
|
||||
stateConnect,
|
||||
|
||||
/** Outbound connection, no handshake. */
|
||||
stateConnected,
|
||||
|
||||
/** Active peer (handshake completed). */
|
||||
stateActive,
|
||||
|
||||
/** Graceful close in progress. */
|
||||
stateClosing
|
||||
};
|
||||
|
||||
Peer (IPAddress const& remote_address, bool inbound, bool fixed)
|
||||
: m_inbound (inbound)
|
||||
, m_remote_address (remote_address)
|
||||
, m_state (inbound ? stateAccept : stateConnect)
|
||||
// inbound
|
||||
SlotImp (IP::Endpoint const& local_endpoint,
|
||||
IP::Endpoint const& remote_endpoint, bool fixed)
|
||||
: m_inbound (true)
|
||||
, m_fixed (fixed)
|
||||
, m_cluster (false)
|
||||
, checked (inbound ? false : true)
|
||||
, canAccept (inbound ? false : true)
|
||||
, m_state (accept)
|
||||
, m_remote_endpoint (remote_endpoint)
|
||||
, m_local_endpoint (local_endpoint)
|
||||
, checked (false)
|
||||
, canAccept (false)
|
||||
, connectivityCheckInProgress (false)
|
||||
{
|
||||
}
|
||||
|
||||
/** Returns the local address on the socket if known. */
|
||||
IPAddress const& local_address () const
|
||||
// outbound
|
||||
SlotImp (IP::Endpoint const& remote_endpoint, bool fixed)
|
||||
: m_inbound (false)
|
||||
, m_fixed (fixed)
|
||||
, m_cluster (false)
|
||||
, m_state (connect)
|
||||
, m_remote_endpoint (remote_endpoint)
|
||||
, checked (true)
|
||||
, canAccept (true)
|
||||
, connectivityCheckInProgress (false)
|
||||
{
|
||||
return m_local_address;
|
||||
}
|
||||
|
||||
/** Sets the local address on the socket. */
|
||||
void local_address (IPAddress const& address)
|
||||
~SlotImp ()
|
||||
{
|
||||
consistency_check (is_unspecified (m_local_address));
|
||||
m_local_address = address;
|
||||
}
|
||||
|
||||
/** Returns the remote address on the socket. */
|
||||
IPAddress const& remote_address () const
|
||||
{
|
||||
return m_remote_address;
|
||||
}
|
||||
|
||||
/** Returns `true` if this is an inbound connection. */
|
||||
bool inbound () const
|
||||
{
|
||||
return m_inbound;
|
||||
}
|
||||
|
||||
/** Returns `true` if this is an outbound connection. */
|
||||
bool outbound () const
|
||||
{
|
||||
return ! m_inbound;
|
||||
}
|
||||
|
||||
/** Marks a connection as belonging to a fixed peer. */
|
||||
void fixed (bool fix)
|
||||
{
|
||||
m_fixed = fix;
|
||||
}
|
||||
|
||||
/** Marks `true` if this is a connection belonging to a fixed peer. */
|
||||
bool fixed () const
|
||||
{
|
||||
return m_fixed;
|
||||
}
|
||||
|
||||
void cluster (bool cluster)
|
||||
{
|
||||
m_cluster = cluster;
|
||||
}
|
||||
|
||||
bool cluster () const
|
||||
{
|
||||
return m_cluster;
|
||||
}
|
||||
|
||||
State state() const
|
||||
State state () const
|
||||
{
|
||||
return m_state;
|
||||
}
|
||||
|
||||
void state (State s)
|
||||
IP::Endpoint const& remote_endpoint () const
|
||||
{
|
||||
m_state = s;
|
||||
return m_remote_endpoint;
|
||||
}
|
||||
|
||||
PeerID const& id () const
|
||||
boost::optional <IP::Endpoint> const& local_endpoint () const
|
||||
{
|
||||
return m_id;
|
||||
return m_local_endpoint;
|
||||
}
|
||||
|
||||
void activate (PeerID const& id, DiscreteTime now)
|
||||
boost::optional <RipplePublicKey> const& public_key () const
|
||||
{
|
||||
m_state = stateActive;
|
||||
m_id = id;
|
||||
return m_public_key;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void state (State state_)
|
||||
{
|
||||
// Must go through activate() to set active state
|
||||
assert (state_ != active);
|
||||
|
||||
// The state must be different
|
||||
assert (state_ != m_state);
|
||||
|
||||
// You can't transition into the initial states
|
||||
assert (state_ != accept && state_ != connect);
|
||||
|
||||
// Can only become connected from outbound connect state
|
||||
assert (state_ != connected || (! m_inbound && m_state == connect));
|
||||
|
||||
// Can't gracefully close on an outbound connection attempt
|
||||
assert (state_ != closing || m_state != connect);
|
||||
|
||||
m_state = state_;
|
||||
}
|
||||
|
||||
void activate (clock_type::time_point const& now)
|
||||
{
|
||||
// Can only become active from the accept or connected state
|
||||
assert (m_state == accept || m_state == connected);
|
||||
|
||||
m_state = active;
|
||||
whenSendEndpoints = now;
|
||||
whenAcceptEndpoints = now;
|
||||
}
|
||||
|
||||
void local_endpoint (IP::Endpoint const& endpoint)
|
||||
{
|
||||
m_local_endpoint = endpoint;
|
||||
}
|
||||
|
||||
void remote_endpoint (IP::Endpoint const& endpoint)
|
||||
{
|
||||
m_remote_endpoint = endpoint;
|
||||
}
|
||||
|
||||
void public_key (RipplePublicKey const& key)
|
||||
{
|
||||
m_public_key = key;
|
||||
}
|
||||
|
||||
void cluster (bool cluster_)
|
||||
{
|
||||
m_cluster = cluster_;
|
||||
}
|
||||
|
||||
private:
|
||||
// `true` if the connection is incoming
|
||||
bool const m_inbound;
|
||||
|
||||
// The local address on the socket, when it is known.
|
||||
IPAddress m_local_address;
|
||||
|
||||
// The remote address on the socket.
|
||||
IPAddress m_remote_address;
|
||||
|
||||
// Current state of this connection
|
||||
State m_state;
|
||||
|
||||
// The public key. Valid after a handshake.
|
||||
PeerID m_id;
|
||||
|
||||
// Set to indicate that this is a fixed peer.
|
||||
bool m_fixed;
|
||||
|
||||
// Set to indicate that this is a peer that belongs in our cluster
|
||||
// and does not consume a slot. Valid after a handshake.
|
||||
bool const m_fixed;
|
||||
bool m_cluster;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
State m_state;
|
||||
IP::Endpoint m_remote_endpoint;
|
||||
boost::optional <IP::Endpoint> m_local_endpoint;
|
||||
boost::optional <RipplePublicKey> m_public_key;
|
||||
|
||||
public:
|
||||
// DEPRECATED public data members
|
||||
@@ -175,13 +176,13 @@ public:
|
||||
bool connectivityCheckInProgress;
|
||||
|
||||
// The time after which we will send the peer mtENDPOINTS
|
||||
DiscreteTime whenSendEndpoints;
|
||||
clock_type::time_point whenSendEndpoints;
|
||||
|
||||
// The time after which we will accept mtENDPOINTS from the peer
|
||||
// This is to prevent flooding or spamming. Receipt of mtENDPOINTS
|
||||
// sooner than the allotted time should impose a load charge.
|
||||
//
|
||||
DiscreteTime whenAcceptEndpoints;
|
||||
clock_type::time_point whenAcceptEndpoints;
|
||||
|
||||
// The set of all recent IPAddress that we have seen from this peer.
|
||||
// We try to avoid sending a peer the same addresses they gave us.
|
||||
@@ -189,6 +190,12 @@ public:
|
||||
//std::set <IPAddress> received;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
Slot::~Slot ()
|
||||
{
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,463 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_PEERFINDER_SLOTS_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_SLOTS_H_INCLUDED
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
class Slots
|
||||
{
|
||||
public:
|
||||
explicit Slots (DiscreteClock <DiscreteTime> clock)
|
||||
: m_clock (clock)
|
||||
, m_inboundSlots (0)
|
||||
, m_inboundActive (0)
|
||||
, m_outboundSlots (0)
|
||||
, m_outboundActive (0)
|
||||
, m_fixedPeerConnections (0)
|
||||
, m_clusterPeerConnections (0)
|
||||
, m_acceptCount (0)
|
||||
, m_connectCount (0)
|
||||
, m_closingCount (0)
|
||||
{
|
||||
#if 0
|
||||
std::random_device rd;
|
||||
std::mt19937 gen (rd());
|
||||
m_roundingThreshold =
|
||||
std::generate_canonical <double, 10> (gen);
|
||||
#else
|
||||
m_roundingThreshold = Random::getSystemRandom().nextDouble();
|
||||
#endif
|
||||
}
|
||||
|
||||
/** Called when the config is set or changed. */
|
||||
void onConfig (Config const& config)
|
||||
{
|
||||
// Calculate the number of outbound peers we want. If we dont want or can't
|
||||
// accept incoming, this will simply be equal to maxPeers. Otherwise
|
||||
// we calculate a fractional amount based on percentages and pseudo-randomly
|
||||
// round up or down.
|
||||
//
|
||||
if (config.wantIncoming)
|
||||
{
|
||||
// Round outPeers upwards using a Bernoulli distribution
|
||||
m_outboundSlots = std::floor (config.outPeers);
|
||||
if (m_roundingThreshold < (config.outPeers - m_outboundSlots))
|
||||
++m_outboundSlots;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_outboundSlots = config.maxPeers;
|
||||
}
|
||||
|
||||
// Calculate the largest number of inbound connections we could take.
|
||||
if (config.maxPeers >= m_outboundSlots)
|
||||
m_inboundSlots = config.maxPeers - m_outboundSlots;
|
||||
else
|
||||
m_inboundSlots = 0;
|
||||
}
|
||||
|
||||
/** Returns the number of accepted connections that haven't handshaked. */
|
||||
int acceptCount() const
|
||||
{
|
||||
return m_acceptCount;
|
||||
}
|
||||
|
||||
/** Returns the number of connection attempts currently active. */
|
||||
int connectCount() const
|
||||
{
|
||||
return m_connectCount;
|
||||
}
|
||||
|
||||
/** Returns the number of connections that are gracefully closing. */
|
||||
int closingCount () const
|
||||
{
|
||||
return m_closingCount;
|
||||
}
|
||||
|
||||
/** Returns the total number of inbound slots. */
|
||||
int inboundSlots () const
|
||||
{
|
||||
return m_inboundSlots;
|
||||
}
|
||||
|
||||
/** Returns the total number of outbound slots. */
|
||||
int outboundSlots () const
|
||||
{
|
||||
return m_outboundSlots;
|
||||
}
|
||||
|
||||
/** Returns the number of inbound peers assigned an open slot. */
|
||||
int inboundActive () const
|
||||
{
|
||||
return m_inboundActive;
|
||||
}
|
||||
|
||||
/** Returns the number of outbound peers assigned an open slot.
|
||||
Fixed peers do not count towards outbound slots used.
|
||||
*/
|
||||
int outboundActive () const
|
||||
{
|
||||
return m_outboundActive;
|
||||
}
|
||||
|
||||
/** Returns the total number of active peers excluding fixed peers. */
|
||||
int totalActive () const
|
||||
{
|
||||
return m_inboundActive + m_outboundActive;
|
||||
}
|
||||
|
||||
/** Returns the number of unused inbound slots.
|
||||
Fixed peers do not deduct from inbound slots or count towards totals.
|
||||
*/
|
||||
int inboundSlotsFree () const
|
||||
{
|
||||
if (m_inboundActive < m_inboundSlots)
|
||||
return m_inboundSlots - m_inboundActive;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Returns the number of unused outbound slots.
|
||||
Fixed peers do not deduct from outbound slots or count towards totals.
|
||||
*/
|
||||
int outboundSlotsFree () const
|
||||
{
|
||||
if (m_outboundActive < m_outboundSlots)
|
||||
return m_outboundSlots - m_outboundActive;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Returns the number of fixed peers we have connections to
|
||||
Fixed peers do not deduct from outbound or inbound slots or count
|
||||
towards totals.
|
||||
*/
|
||||
int fixedPeers () const
|
||||
{
|
||||
return m_fixedPeerConnections;
|
||||
}
|
||||
|
||||
/** Returns the number of cluster peers we have connections to
|
||||
Cluster nodes do not deduct from outbound or inbound slots or
|
||||
count towards totals, but they are tracked if they are also
|
||||
configured as fixed peers.
|
||||
*/
|
||||
int clusterPeers () const
|
||||
{
|
||||
return m_clusterPeerConnections;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Called when an inbound connection is accepted. */
|
||||
void onPeerAccept ()
|
||||
{
|
||||
++m_acceptCount;
|
||||
}
|
||||
|
||||
/** Called when a new outbound connection is attempted. */
|
||||
void onPeerConnect ()
|
||||
{
|
||||
++m_connectCount;
|
||||
}
|
||||
|
||||
/** Determines if an outbound slot is available and assigns it */
|
||||
HandshakeAction grabOutboundSlot(bool self, bool fixed,
|
||||
bool available, bool cluster)
|
||||
{
|
||||
// If this is a connection to ourselves, we bail.
|
||||
if (self)
|
||||
{
|
||||
++m_closingCount;
|
||||
return doClose;
|
||||
}
|
||||
|
||||
// Fixed and cluster peers are tracked but are not subject
|
||||
// to limits and don't consume slots. They are always allowed
|
||||
// to connect.
|
||||
if (fixed || cluster)
|
||||
{
|
||||
if (fixed)
|
||||
++m_fixedPeerConnections;
|
||||
|
||||
if (cluster)
|
||||
++m_clusterPeerConnections;
|
||||
|
||||
return doActivate;
|
||||
}
|
||||
|
||||
// If we don't have any slots for this peer then reject the
|
||||
// connection.
|
||||
if (!available)
|
||||
{
|
||||
++m_closingCount;
|
||||
return doClose;
|
||||
}
|
||||
|
||||
++m_outboundActive;
|
||||
return doActivate;
|
||||
}
|
||||
|
||||
/** Determines if an inbound slot is available and assigns it */
|
||||
HandshakeAction grabInboundSlot(bool self, bool fixed,
|
||||
bool available, bool cluster)
|
||||
{
|
||||
// If this is a connection to ourselves, we bail.
|
||||
if (self)
|
||||
{
|
||||
++m_closingCount;
|
||||
return doClose;
|
||||
}
|
||||
|
||||
// Fixed and cluster peers are tracked but are not subject
|
||||
// to limits and don't consume slots. They are always allowed
|
||||
// to connect.
|
||||
if (fixed || cluster)
|
||||
{
|
||||
if (fixed)
|
||||
++m_fixedPeerConnections;
|
||||
|
||||
if (cluster)
|
||||
++m_clusterPeerConnections;
|
||||
|
||||
return doActivate;
|
||||
}
|
||||
|
||||
// If we don't have any slots for this peer then reject the
|
||||
// connection and redirect them.
|
||||
if (!available)
|
||||
{
|
||||
++m_closingCount;
|
||||
return doRedirect;
|
||||
}
|
||||
|
||||
++m_inboundActive;
|
||||
return doActivate;
|
||||
}
|
||||
|
||||
/** Called when a peer handshakes.
|
||||
Returns the disposition for this peer, including whether we should
|
||||
activate the connection, issue a redirect or simply close it.
|
||||
*/
|
||||
HandshakeAction onPeerHandshake (bool inbound, bool self, bool fixed, bool cluster)
|
||||
{
|
||||
if (cluster)
|
||||
return doActivate;
|
||||
|
||||
if (inbound)
|
||||
{
|
||||
// Must not be zero!
|
||||
consistency_check (m_acceptCount > 0);
|
||||
--m_acceptCount;
|
||||
|
||||
return grabInboundSlot (self, fixed,
|
||||
inboundSlotsFree () > 0, cluster);
|
||||
}
|
||||
|
||||
// Must not be zero!
|
||||
consistency_check (m_connectCount > 0);
|
||||
--m_connectCount;
|
||||
|
||||
return grabOutboundSlot (self, fixed,
|
||||
outboundSlotsFree () > 0, cluster);
|
||||
}
|
||||
|
||||
/** Called when a peer socket is closed gracefully. */
|
||||
void onPeerGracefulClose ()
|
||||
{
|
||||
// Must not be zero!
|
||||
consistency_check (m_closingCount > 0);
|
||||
--m_closingCount;
|
||||
}
|
||||
|
||||
/** Called when a peer socket is closed.
|
||||
A value of `true` for active means the peer was assigned an open slot.
|
||||
*/
|
||||
void onPeerClosed (bool inbound, bool active, bool fixed, bool cluster)
|
||||
{
|
||||
if (active)
|
||||
{
|
||||
if (inbound)
|
||||
{
|
||||
// Fixed peer connections are tracked but don't count towards slots
|
||||
if (fixed || cluster)
|
||||
{
|
||||
if (fixed)
|
||||
{
|
||||
consistency_check (m_fixedPeerConnections > 0);
|
||||
--m_fixedPeerConnections;
|
||||
}
|
||||
|
||||
if (cluster)
|
||||
{
|
||||
consistency_check (m_clusterPeerConnections > 0);
|
||||
--m_clusterPeerConnections;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Must not be zero!
|
||||
consistency_check (m_inboundActive > 0);
|
||||
--m_inboundActive;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Fixed peer connections are tracked but don't count towards slots
|
||||
if (fixed || cluster)
|
||||
{
|
||||
if (fixed)
|
||||
{
|
||||
consistency_check (m_fixedPeerConnections > 0);
|
||||
--m_fixedPeerConnections;
|
||||
}
|
||||
|
||||
if (cluster)
|
||||
{
|
||||
consistency_check (m_clusterPeerConnections > 0);
|
||||
--m_clusterPeerConnections;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Must not be zero!
|
||||
consistency_check (m_outboundActive > 0);
|
||||
--m_outboundActive;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (inbound)
|
||||
{
|
||||
// Must not be zero!
|
||||
consistency_check (m_acceptCount > 0);
|
||||
--m_acceptCount;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Must not be zero!
|
||||
consistency_check (m_connectCount > 0);
|
||||
--m_connectCount;
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Returns the number of new connection attempts we should make. */
|
||||
int additionalAttemptsNeeded () const
|
||||
{
|
||||
// Don't go over the maximum concurrent attempt limit
|
||||
if (m_connectCount >= Tuning::maxConnectAttempts)
|
||||
return 0;
|
||||
int needed (outboundSlotsFree ());
|
||||
// This is the most we could attempt right now
|
||||
int const available (
|
||||
Tuning::maxConnectAttempts - m_connectCount);
|
||||
return std::min (needed, available);
|
||||
}
|
||||
|
||||
/** Returns true if the slot logic considers us "connected" to the network. */
|
||||
bool isConnectedToNetwork () const
|
||||
{
|
||||
// We will consider ourselves connected if we have reached
|
||||
// the number of outgoing connections desired, or if connect
|
||||
// automatically is false.
|
||||
//
|
||||
// Fixed peers do not count towards the active outgoing total.
|
||||
|
||||
if (m_outboundSlots > 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Output statistics. */
|
||||
void onWrite (PropertyStream::Map& map)
|
||||
{
|
||||
map ["accept"] = acceptCount();
|
||||
map ["connect"] = connectCount();
|
||||
map ["close"] = closingCount();
|
||||
map ["in"] << inboundActive() << "/" << inboundSlots();
|
||||
map ["out"] << outboundActive() << "/" << outboundSlots();
|
||||
map ["fixed"] = fixedPeers();
|
||||
}
|
||||
|
||||
/** Records the state for diagnostics. */
|
||||
std::string state_string () const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss <<
|
||||
outboundActive() << "/" << outboundSlots() << " out, " <<
|
||||
inboundActive() << "/" << inboundSlots() << " in, " <<
|
||||
connectCount() << " connecting, " <<
|
||||
closingCount() << " closing"
|
||||
;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
DiscreteClock <DiscreteTime> m_clock;
|
||||
|
||||
/** Total number of inbound slots. */
|
||||
int m_inboundSlots;
|
||||
|
||||
/** Number of inbound slots assigned to active peers. */
|
||||
int m_inboundActive;
|
||||
|
||||
/** Total number of outbound slots. */
|
||||
int m_outboundSlots;
|
||||
|
||||
/** Number of outbound slots assigned to active peers. */
|
||||
int m_outboundActive;
|
||||
|
||||
/** Number of fixed peer connections that we have. */
|
||||
int m_fixedPeerConnections;
|
||||
|
||||
/** Number of cluster peer connections that we have. */
|
||||
int m_clusterPeerConnections;
|
||||
|
||||
// Number of inbound connections that are
|
||||
// not active or gracefully closing.
|
||||
int m_acceptCount;
|
||||
|
||||
// Number of outgoing connections that are
|
||||
// not active or gracefully closing.
|
||||
//
|
||||
int m_connectCount;
|
||||
|
||||
// Number of connections that are gracefully closing.
|
||||
int m_closingCount;
|
||||
|
||||
// Number of connections that are currently assigned an open slot
|
||||
//int m_activeCount;
|
||||
|
||||
/** Fractional threshold below which we round down.
|
||||
This is used to round the value of Config::outPeers up or down in
|
||||
such a way that the network-wide average number of outgoing
|
||||
connections approximates the recommended, fractional value.
|
||||
*/
|
||||
double m_roundingThreshold;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -32,7 +32,7 @@ public:
|
||||
struct SavedBootstrapAddress
|
||||
{
|
||||
IPAddress address;
|
||||
int cumulativeUptimeSeconds;
|
||||
std::chrono::seconds cumulativeUptime;
|
||||
int connectionValence;
|
||||
};
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ public:
|
||||
|
||||
{
|
||||
std::string s;
|
||||
int uptimeSeconds;
|
||||
std::chrono::seconds::rep uptimeSeconds;
|
||||
int connectionValence;
|
||||
|
||||
sqdb::statement st = (m_session.prepare <<
|
||||
@@ -115,7 +115,7 @@ public:
|
||||
|
||||
if (! is_unspecified (entry.address))
|
||||
{
|
||||
entry.cumulativeUptimeSeconds = uptimeSeconds;
|
||||
entry.cumulativeUptime = std::chrono::seconds (uptimeSeconds);
|
||||
entry.connectionValence = connectionValence;
|
||||
|
||||
list.push_back (entry);
|
||||
@@ -153,7 +153,7 @@ public:
|
||||
if (! error)
|
||||
{
|
||||
std::string s;
|
||||
int uptimeSeconds;
|
||||
std::chrono::seconds::rep uptimeSeconds;
|
||||
int connectionValence;
|
||||
|
||||
sqdb::statement st = (m_session.prepare <<
|
||||
@@ -173,7 +173,7 @@ public:
|
||||
list.begin()); !error && iter != list.end(); ++iter)
|
||||
{
|
||||
s = to_string (iter->address);
|
||||
uptimeSeconds = iter->cumulativeUptimeSeconds;
|
||||
uptimeSeconds = iter->cumulativeUptime.count ();
|
||||
connectionValence = iter->connectionValence;
|
||||
|
||||
st.execute_and_fetch (error);
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
#ifndef RIPPLE_PEERFINDER_TUNING_H_INCLUDED
|
||||
#define RIPPLE_PEERFINDER_TUNING_H_INCLUDED
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
|
||||
@@ -39,7 +41,7 @@ enum
|
||||
secondsPerConnect = 10
|
||||
|
||||
/** Maximum number of simultaneous connection attempts. */
|
||||
,maxConnectAttempts = 5
|
||||
,maxConnectAttempts = 20
|
||||
|
||||
/** The percentage of total peer slots that are outbound.
|
||||
The number of outbound peers will be the larger of the
|
||||
@@ -59,33 +61,57 @@ enum
|
||||
|
||||
//---------------------------------------------------------
|
||||
//
|
||||
// Bootcache
|
||||
// LegacyEndpoints
|
||||
//
|
||||
//---------------------------------------------------------
|
||||
|
||||
// How many legacy endpoints to keep in our cache
|
||||
,legacyEndpointCacheSize = 1000
|
||||
|
||||
// How many cache mutations between each database update
|
||||
,legacyEndpointMutationsPerUpdate = 50
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Fixed
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
static std::array <int, 10> const connectionBackoff
|
||||
{{ 1, 1, 2, 3, 5, 8, 13, 21, 34, 55 }};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Bootcache
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
enum
|
||||
{
|
||||
// Threshold of cache entries above which we trim.
|
||||
,bootcacheSize = 1000
|
||||
bootcacheSize = 1000
|
||||
|
||||
// The percentage of addresses we prune when we trim the cache.
|
||||
,bootcachePrunePercent = 10
|
||||
};
|
||||
|
||||
// The cool down wait between database updates
|
||||
// Ideally this should be larger than the time it takes a full
|
||||
// peer to send us a set of addresses and then disconnect.
|
||||
//
|
||||
,bootcacheCooldownSeconds = 60
|
||||
// The cool down wait between database updates
|
||||
// Ideally this should be larger than the time it takes a full
|
||||
// peer to send us a set of addresses and then disconnect.
|
||||
//
|
||||
static std::chrono::seconds const bootcacheCooldownTime (60);
|
||||
|
||||
//---------------------------------------------------------
|
||||
//
|
||||
// Livecache
|
||||
//
|
||||
//---------------------------------------------------------
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Livecache
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
enum
|
||||
{
|
||||
// Drop incoming messages with hops greater than this number
|
||||
,maxHops = 10
|
||||
|
||||
// How often we send or accept mtENDPOINTS messages per peer
|
||||
,secondsPerMessage = 5
|
||||
maxHops = 10
|
||||
|
||||
// How many Endpoint to send in each mtENDPOINTS
|
||||
,numberOfEndpoints = 10
|
||||
@@ -93,10 +119,6 @@ enum
|
||||
// The most Endpoint we will accept in mtENDPOINTS
|
||||
,numberOfEndpointsMax = 20
|
||||
|
||||
// How long an Endpoint will stay in the cache
|
||||
// This should be a small multiple of the broadcast frequency
|
||||
,liveCacheSecondsToLive = 60
|
||||
|
||||
// The maximum number of hops that we allow. Peers farther
|
||||
// away than this are dropped.
|
||||
,maxPeerHopCount = 10
|
||||
@@ -107,22 +129,16 @@ enum
|
||||
|
||||
/** Number of addresses we provide when redirecting. */
|
||||
,redirectEndpointCount = 10
|
||||
|
||||
//---------------------------------------------------------
|
||||
//
|
||||
// LegacyEndpoints
|
||||
//
|
||||
//---------------------------------------------------------
|
||||
|
||||
// How many legacy endpoints to keep in our cache
|
||||
,legacyEndpointCacheSize = 1000
|
||||
|
||||
// How many cache mutations between each database update
|
||||
,legacyEndpointMutationsPerUpdate = 50
|
||||
};
|
||||
/** @} */
|
||||
|
||||
// How often we send or accept mtENDPOINTS messages per peer
|
||||
static std::chrono::seconds const secondsPerMessage (5);
|
||||
|
||||
// How long an Endpoint will stay in the cache
|
||||
// This should be a small multiple of the broadcast frequency
|
||||
static std::chrono::seconds const liveCacheSecondsToLive (60);
|
||||
}
|
||||
/** @} */
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "ripple_peerfinder.h"
|
||||
|
||||
#include "../../ripple/algorithm/api/CycledSet.h"
|
||||
#include "../../ripple/algorithm/api/DiscreteClock.h"
|
||||
#include "../../ripple/common/Resolver.h"
|
||||
|
||||
#include <deque>
|
||||
@@ -60,29 +59,26 @@ using namespace beast;
|
||||
|
||||
# include "impl/Tuning.h"
|
||||
# include "impl/Checker.h"
|
||||
# include "impl/Resolver.h"
|
||||
#include "impl/CheckerAdapter.h"
|
||||
# include "impl/Sorts.h"
|
||||
# include "impl/Giveaways.h"
|
||||
# include "impl/Livecache.h"
|
||||
# include "impl/Slots.h"
|
||||
# include "impl/SlotImp.h"
|
||||
# include "impl/Counts.h"
|
||||
# include "impl/Source.h"
|
||||
#include "impl/SourceStrings.h"
|
||||
# include "impl/Store.h"
|
||||
# include "impl/Bootcache.h"
|
||||
# include "impl/Peer.h"
|
||||
//# include "impl/Peer.h"
|
||||
#include "impl/StoreSqdb.h"
|
||||
# include "impl/Reporting.h"
|
||||
#include "impl/FixedPeer.h"
|
||||
# include "impl/Logic.h"
|
||||
#include "impl/LogicType.h"
|
||||
#include "impl/Logic.h"
|
||||
|
||||
#include "impl/Checker.cpp"
|
||||
#include "impl/Config.cpp"
|
||||
#include "impl/Endpoint.cpp"
|
||||
#include "impl/Livecache.cpp"
|
||||
#include "impl/Manager.cpp"
|
||||
#include "impl/Resolver.cpp"
|
||||
#include "impl/SourceStrings.cpp"
|
||||
|
||||
//#include "sim/sync_timer.h"
|
||||
|
||||
@@ -30,6 +30,7 @@ using namespace beast;
|
||||
|
||||
#include "../types/api/RipplePublicKey.h"
|
||||
|
||||
#include "api/Slot.h"
|
||||
# include "api/Endpoint.h"
|
||||
# include "api/Types.h"
|
||||
#include "api/Callback.h"
|
||||
|
||||
@@ -56,19 +56,19 @@ is_remote_node_pred <Node> is_remote_node (Node const* node)
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** UnaryPredicate, `true` if the remote address matches. */
|
||||
class is_remote_address
|
||||
class is_remote_endpoint
|
||||
{
|
||||
public:
|
||||
explicit is_remote_address (IPAddress const& address)
|
||||
: m_address (address)
|
||||
explicit is_remote_endpoint (IPAddress const& address)
|
||||
: m_endpoint (address)
|
||||
{ }
|
||||
template <typename Link>
|
||||
bool operator() (Link const& link) const
|
||||
{
|
||||
return link.remote_address() == m_address;
|
||||
return link.remote_endpoint() == m_endpoint;
|
||||
}
|
||||
private:
|
||||
IPAddress const m_address;
|
||||
IPAddress const m_endpoint;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#if 0
|
||||
|
||||
namespace ripple {
|
||||
namespace PeerFinder {
|
||||
namespace Sim {
|
||||
@@ -37,7 +39,7 @@ public:
|
||||
typedef std::list <Node> Peers;
|
||||
|
||||
typedef boost::unordered_map <
|
||||
IPAddress, boost::reference_wrapper <Node>> Table;
|
||||
IP::Endpoint, boost::reference_wrapper <Node>> Table;
|
||||
|
||||
explicit Network (Params const& params,
|
||||
Journal journal = Journal());
|
||||
@@ -48,10 +50,10 @@ public:
|
||||
void prepare ();
|
||||
Journal journal () const;
|
||||
int next_node_id ();
|
||||
DiscreteTime now ();
|
||||
clock_type::time_point now ();
|
||||
Peers& nodes();
|
||||
Peers const& nodes() const;
|
||||
Node* find (IPAddress const& address);
|
||||
Node* find (IP::Endpoint const& address);
|
||||
void step ();
|
||||
|
||||
template <typename Function>
|
||||
@@ -62,7 +64,7 @@ private:
|
||||
Params m_params;
|
||||
Journal m_journal;
|
||||
int m_next_node_id;
|
||||
ManualClock m_clock_source;
|
||||
manual_clock <std::chrono::seconds> m_clock;
|
||||
Peers m_nodes;
|
||||
Table m_table;
|
||||
FunctionQueue m_queue;
|
||||
@@ -82,14 +84,16 @@ public:
|
||||
|
||||
Link (
|
||||
Node& local_node,
|
||||
IPAddress const& local_address,
|
||||
SlotImp::ptr const& slot,
|
||||
IP::Endpoint const& local_endpoint,
|
||||
Node& remote_node,
|
||||
IPAddress const& remote_address,
|
||||
IP::Endpoint const& remote_endpoint,
|
||||
bool inbound)
|
||||
: m_local_node (&local_node)
|
||||
, m_local_address (local_address)
|
||||
, m_slot (slot)
|
||||
, m_local_endpoint (local_endpoint)
|
||||
, m_remote_node (&remote_node)
|
||||
, m_remote_address (remote_address)
|
||||
, m_remote_endpoint (remote_endpoint)
|
||||
, m_inbound (inbound)
|
||||
, m_closed (false)
|
||||
{
|
||||
@@ -101,9 +105,10 @@ public:
|
||||
bool inbound () const { return m_inbound; }
|
||||
bool outbound () const { return ! m_inbound; }
|
||||
|
||||
IPAddress const& remote_address() const { return m_remote_address; }
|
||||
IPAddress const& local_address() const { return m_local_address; }
|
||||
IP::Endpoint const& remote_endpoint() const { return m_remote_endpoint; }
|
||||
IP::Endpoint const& local_endpoint() const { return m_local_endpoint; }
|
||||
|
||||
SlotImp::ptr const& slot () const { return m_slot; }
|
||||
Node& remote_node () { return *m_remote_node; }
|
||||
Node const& remote_node () const { return *m_remote_node; }
|
||||
Node& local_node () { return *m_local_node; }
|
||||
@@ -133,9 +138,10 @@ public:
|
||||
|
||||
private:
|
||||
Node* m_local_node;
|
||||
IPAddress m_local_address;
|
||||
SlotImp::ptr m_slot;
|
||||
IP::Endpoint m_local_endpoint;
|
||||
Node* m_remote_node;
|
||||
IPAddress m_remote_address;
|
||||
IP::Endpoint m_remote_endpoint;
|
||||
bool m_inbound;
|
||||
bool m_closed;
|
||||
Messages m_current;
|
||||
@@ -161,8 +167,8 @@ public:
|
||||
}
|
||||
|
||||
bool canAccept;
|
||||
IPAddress listening_address;
|
||||
IPAddress well_known_address;
|
||||
IP::Endpoint listening_endpoint;
|
||||
IP::Endpoint well_known_endpoint;
|
||||
PeerFinder::Config config;
|
||||
};
|
||||
|
||||
@@ -172,17 +178,17 @@ public:
|
||||
Node (
|
||||
Network& network,
|
||||
Config const& config,
|
||||
DiscreteClock <DiscreteTime> clock,
|
||||
clock_type& clock,
|
||||
Journal journal)
|
||||
: m_network (network)
|
||||
, m_id (network.next_node_id())
|
||||
, m_config (config)
|
||||
, m_node_id (PeerID::createFromInteger (m_id))
|
||||
, m_node_id (RipplePublicKey::createFromInteger (m_id))
|
||||
, m_sink (prefix(), journal.sink())
|
||||
, m_journal (Journal (m_sink, journal.severity()), Reporting::node)
|
||||
, m_next_port (m_config.listening_address.port() + 1)
|
||||
, m_next_port (m_config.listening_endpoint.port() + 1)
|
||||
, m_logic (boost::in_place (
|
||||
clock, boost::ref (*this), boost::ref (*this), boost::ref (*this), m_journal))
|
||||
boost::ref (clock), boost::ref (*this), boost::ref (*this), boost::ref (*this), m_journal))
|
||||
, m_whenSweep (m_network.now() + Tuning::liveCacheSecondsToLive)
|
||||
{
|
||||
logic().setConfig (m_config.config);
|
||||
@@ -197,7 +203,7 @@ public:
|
||||
|
||||
void dump (Journal::ScopedStream& ss) const
|
||||
{
|
||||
ss << listening_address();
|
||||
ss << listening_endpoint();
|
||||
logic().dump (ss);
|
||||
}
|
||||
|
||||
@@ -216,7 +222,7 @@ public:
|
||||
return m_id;
|
||||
}
|
||||
|
||||
PeerID const& node_id () const
|
||||
RipplePublicKey const& node_id () const
|
||||
{
|
||||
return m_node_id;
|
||||
}
|
||||
@@ -231,9 +237,9 @@ public:
|
||||
return m_logic.get();
|
||||
}
|
||||
|
||||
IPAddress const& listening_address () const
|
||||
IP::Endpoint const& listening_endpoint () const
|
||||
{
|
||||
return m_config.listening_address;
|
||||
return m_config.listening_endpoint;
|
||||
}
|
||||
|
||||
bool canAccept () const
|
||||
@@ -243,7 +249,7 @@ public:
|
||||
|
||||
void receive (Link const& c, Message const& m)
|
||||
{
|
||||
logic().onPeerEndpoints (c.remote_address(), m.payload());
|
||||
logic().on_endpoints (c.slot (), m.payload());
|
||||
}
|
||||
|
||||
void pre_step ()
|
||||
@@ -268,8 +274,8 @@ public:
|
||||
if (iter->closed ())
|
||||
{
|
||||
// Post notification?
|
||||
iter->local_node().logic().onPeerClosed (
|
||||
iter->remote_address());
|
||||
iter->local_node().logic().on_closed (
|
||||
iter->remote_endpoint());
|
||||
iter = links().erase (iter);
|
||||
}
|
||||
else
|
||||
@@ -297,11 +303,11 @@ public:
|
||||
//
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
void sendEndpoints (IPAddress const& remote_address,
|
||||
void sendEndpoints (IP::Endpoint const& remote_endpoint,
|
||||
Endpoints const& endpoints)
|
||||
{
|
||||
m_network.post (std::bind (&Node::doSendEndpoints, this,
|
||||
remote_address, endpoints));
|
||||
remote_endpoint, endpoints));
|
||||
}
|
||||
|
||||
void connectPeers (IPAddresses const& addresses)
|
||||
@@ -310,23 +316,23 @@ public:
|
||||
addresses));
|
||||
}
|
||||
|
||||
void disconnectPeer (IPAddress const& remote_address, bool graceful)
|
||||
void disconnectPeer (IP::Endpoint const& remote_endpoint, bool graceful)
|
||||
{
|
||||
m_network.post (std::bind (&Node::doDisconnectPeer, this,
|
||||
remote_address, graceful));
|
||||
remote_endpoint, graceful));
|
||||
}
|
||||
|
||||
void activatePeer (IPAddress const& remote_address)
|
||||
void activatePeer (IP::Endpoint const& remote_endpoint)
|
||||
{
|
||||
/* no underlying peer to activate */
|
||||
}
|
||||
|
||||
void doSendEndpoints (IPAddress const& remote_address,
|
||||
void doSendEndpoints (IP::Endpoint const& remote_endpoint,
|
||||
Endpoints const& endpoints)
|
||||
{
|
||||
Links::iterator const iter1 (std::find_if (
|
||||
links().begin (), links().end(),
|
||||
is_remote_address (remote_address)));
|
||||
is_remote_endpoint (remote_endpoint)));
|
||||
if (iter1 != links().end())
|
||||
{
|
||||
// Drop the message if they closed their end
|
||||
@@ -336,7 +342,7 @@ public:
|
||||
// Find their link to us
|
||||
Links::iterator const iter2 (std::find_if (
|
||||
remote_node.links().begin(), remote_node.links().end(),
|
||||
is_remote_address (iter1->local_address ())));
|
||||
is_remote_endpoint (iter1->local_endpoint ())));
|
||||
consistency_check (iter2 != remote_node.links().end());
|
||||
|
||||
//
|
||||
@@ -349,19 +355,19 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void doCheckAccept (Node& remote_node, IPAddress const& remote_address)
|
||||
void doCheckAccept (Node& remote_node, IP::Endpoint const& remote_endpoint)
|
||||
{
|
||||
// Find our link to the remote node
|
||||
Links::iterator iter (std::find_if (m_links.begin (),
|
||||
m_links.end(), is_remote_address (remote_address)));
|
||||
m_links.end(), is_remote_endpoint (remote_endpoint)));
|
||||
// See if the logic closed the connection
|
||||
if (iter == m_links.end())
|
||||
return;
|
||||
// Post notifications
|
||||
m_network.post (std::bind (&Logic::onPeerHandshake,
|
||||
&remote_node.logic(), iter->local_address(), node_id(), false));
|
||||
m_network.post (std::bind (&Logic::onPeerHandshake,
|
||||
&logic(), remote_address, remote_node.node_id(), false));
|
||||
m_network.post (std::bind (&Logic::on_handshake,
|
||||
&remote_node.logic(), iter->local_endpoint(), node_id(), false));
|
||||
m_network.post (std::bind (&Logic::on_handshake,
|
||||
&logic(), remote_endpoint, remote_node.node_id(), false));
|
||||
}
|
||||
|
||||
void doConnectPeers (IPAddresses const& addresses)
|
||||
@@ -369,43 +375,49 @@ public:
|
||||
for (IPAddresses::const_iterator iter (addresses.begin());
|
||||
iter != addresses.end(); ++iter)
|
||||
{
|
||||
IPAddress const& remote_address (*iter);
|
||||
Node* const remote_node (m_network.find (remote_address));
|
||||
// Post notification
|
||||
m_network.post (std::bind (&Logic::onPeerConnect,
|
||||
&logic(), remote_address));
|
||||
IP::Endpoint const& remote_endpoint (*iter);
|
||||
Node* const remote_node (m_network.find (remote_endpoint));
|
||||
// Acquire slot
|
||||
Slot::ptr const local_slot (
|
||||
m_logic->new_outbound_slot (remote_endpoint));
|
||||
if (! local_slot)
|
||||
continue;
|
||||
// See if the address is connectible
|
||||
if (remote_node == nullptr || ! remote_node->canAccept())
|
||||
{
|
||||
// Firewalled or no one listening
|
||||
// Post notification
|
||||
m_network.post (std::bind (&Logic::onPeerClosed,
|
||||
&logic(), remote_address));
|
||||
m_network.post (std::bind (&Logic::on_closed,
|
||||
&logic(), local_slot));
|
||||
continue;
|
||||
}
|
||||
IP::Endpoint const local_endpoint (
|
||||
listening_endpoint().at_port (m_next_port++));
|
||||
// Acquire slot
|
||||
Slot::ptr const remote_slot (
|
||||
remote_node->logic().new_inbound_slot (
|
||||
remote_endpoint, local_endpoint));
|
||||
if (! remote_slot)
|
||||
continue;
|
||||
// Connection established, create links
|
||||
IPAddress const local_address (
|
||||
listening_address().at_port (m_next_port++));
|
||||
m_links.emplace_back (*this, local_address,
|
||||
*remote_node, remote_address, false);
|
||||
remote_node->m_links.emplace_back (*remote_node,
|
||||
remote_address, *this, local_address, true);
|
||||
m_links.emplace_back (*this, local_slot, local_endpoint,
|
||||
*remote_node, remote_endpoint, false);
|
||||
remote_node->m_links.emplace_back (*remote_node, remote_slot,
|
||||
remote_endpoint, *this, local_endpoint, true);
|
||||
// Post notifications
|
||||
m_network.post (std::bind (&Logic::onPeerConnected,
|
||||
&logic(), local_address, remote_address));
|
||||
m_network.post (std::bind (&Logic::onPeerAccept,
|
||||
&remote_node->logic(), remote_address, local_address));
|
||||
m_network.post (std::bind (&Logic::on_connected,
|
||||
&logic(), local_endpoint, remote_endpoint));
|
||||
m_network.post (std::bind (&Node::doCheckAccept,
|
||||
remote_node, boost::ref (*this), local_address));
|
||||
remote_node, boost::ref (*this), local_endpoint));
|
||||
}
|
||||
}
|
||||
|
||||
void doClosed (IPAddress const& remote_address, bool graceful)
|
||||
void doClosed (IP::Endpoint const& remote_endpoint, bool graceful)
|
||||
{
|
||||
// Find our link to them
|
||||
Links::iterator const iter (std::find_if (
|
||||
m_links.begin(), m_links.end(),
|
||||
is_remote_address (remote_address)));
|
||||
is_remote_endpoint (remote_endpoint)));
|
||||
// Must be connected!
|
||||
check_invariant (iter != m_links.end());
|
||||
// Must be closed!
|
||||
@@ -413,46 +425,46 @@ public:
|
||||
// Remove our link to them
|
||||
m_links.erase (iter);
|
||||
// Notify
|
||||
m_network.post (std::bind (&Logic::onPeerClosed,
|
||||
&logic(), remote_address));
|
||||
m_network.post (std::bind (&Logic::on_closed,
|
||||
&logic(), remote_endpoint));
|
||||
}
|
||||
|
||||
void doDisconnectPeer (IPAddress const& remote_address, bool graceful)
|
||||
void doDisconnectPeer (IP::Endpoint const& remote_endpoint, bool graceful)
|
||||
{
|
||||
// Find our link to them
|
||||
Links::iterator const iter1 (std::find_if (
|
||||
m_links.begin(), m_links.end(),
|
||||
is_remote_address (remote_address)));
|
||||
is_remote_endpoint (remote_endpoint)));
|
||||
if (iter1 == m_links.end())
|
||||
return;
|
||||
Node& remote_node (iter1->remote_node());
|
||||
IPAddress const local_address (iter1->local_address());
|
||||
IP::Endpoint const local_endpoint (iter1->local_endpoint());
|
||||
// Find their link to us
|
||||
Links::iterator const iter2 (std::find_if (
|
||||
remote_node.links().begin(), remote_node.links().end(),
|
||||
is_remote_address (local_address)));
|
||||
is_remote_endpoint (local_endpoint)));
|
||||
if (iter2 != remote_node.links().end())
|
||||
{
|
||||
// Notify the remote that we closed
|
||||
check_invariant (! iter2->closed());
|
||||
iter2->close();
|
||||
m_network.post (std::bind (&Node::doClosed,
|
||||
&remote_node, local_address, graceful));
|
||||
&remote_node, local_endpoint, graceful));
|
||||
}
|
||||
if (! iter1->closed ())
|
||||
{
|
||||
// Remove our link to them
|
||||
m_links.erase (iter1);
|
||||
// Notify
|
||||
m_network.post (std::bind (&Logic::onPeerClosed,
|
||||
&logic(), remote_address));
|
||||
m_network.post (std::bind (&Logic::on_closed,
|
||||
&logic(), remote_endpoint));
|
||||
}
|
||||
|
||||
/*
|
||||
if (! graceful || ! iter2->pending ())
|
||||
{
|
||||
remote_node.links().erase (iter2);
|
||||
remote_node.logic().onPeerClosed (local_address);
|
||||
remote_node.logic().on_closed (local_endpoint);
|
||||
}
|
||||
*/
|
||||
}
|
||||
@@ -467,8 +479,8 @@ public:
|
||||
{
|
||||
std::vector <SavedBootstrapAddress> result;
|
||||
SavedBootstrapAddress item;
|
||||
item.address = m_config.well_known_address;
|
||||
item.cumulativeUptimeSeconds = 0;
|
||||
item.address = m_config.well_known_endpoint;
|
||||
item.cumulativeUptime = std::chrono::seconds (0);
|
||||
item.connectionValence = 0;
|
||||
result.push_back (item);
|
||||
return result;
|
||||
@@ -488,7 +500,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void async_test (IPAddress const& address,
|
||||
void async_test (IP::Endpoint const& address,
|
||||
AbstractHandler <void (Result)> handler)
|
||||
{
|
||||
Node* const node (m_network.find (address));
|
||||
@@ -516,12 +528,12 @@ private:
|
||||
Network& m_network;
|
||||
int const m_id;
|
||||
Config const m_config;
|
||||
PeerID m_node_id;
|
||||
RipplePublicKey m_node_id;
|
||||
WrappedSink m_sink;
|
||||
Journal m_journal;
|
||||
IP::Port m_next_port;
|
||||
boost::optional <Logic> m_logic;
|
||||
DiscreteTime m_whenSweep;
|
||||
clock_type::time_point m_whenSweep;
|
||||
SavedBootstrapAddresses m_bootstrap_cache;
|
||||
};
|
||||
|
||||
@@ -537,13 +549,13 @@ void Link::step ()
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
static IPAddress next_address (IPAddress address)
|
||||
static IP::Endpoint next_endpoint (IP::Endpoint address)
|
||||
{
|
||||
if (address.is_v4())
|
||||
{
|
||||
do
|
||||
{
|
||||
address = IPAddress (IP::AddressV4 (
|
||||
address = IP::Endpoint (IP::AddressV4 (
|
||||
address.to_v4().value + 1)).at_port (address.port());
|
||||
}
|
||||
while (! is_public (address));
|
||||
@@ -554,7 +566,7 @@ static IPAddress next_address (IPAddress address)
|
||||
bassert (address.is_v6());
|
||||
// unimplemented
|
||||
bassertfalse;
|
||||
return IPAddress();
|
||||
return IP::Endpoint();
|
||||
}
|
||||
|
||||
Network::Network (
|
||||
@@ -569,9 +581,9 @@ Network::Network (
|
||||
|
||||
void Network::prepare ()
|
||||
{
|
||||
IPAddress const well_known_address (
|
||||
IPAddress::from_string ("1.0.0.1").at_port (1));
|
||||
IPAddress address (well_known_address);
|
||||
IP::Endpoint const well_known_endpoint (
|
||||
IP::Endpoint::from_string ("1.0.0.1").at_port (1));
|
||||
IP::Endpoint address (well_known_endpoint);
|
||||
|
||||
for (int i = 0; i < params().nodes; ++i )
|
||||
{
|
||||
@@ -579,8 +591,8 @@ void Network::prepare ()
|
||||
{
|
||||
Node::Config config;
|
||||
config.canAccept = true;
|
||||
config.listening_address = address;
|
||||
config.well_known_address = well_known_address;
|
||||
config.listening_endpoint = address;
|
||||
config.well_known_endpoint = well_known_endpoint;
|
||||
config.config.maxPeers = params().maxPeers;
|
||||
config.config.outPeers = params().outPeers;
|
||||
config.config.wantIncoming = true;
|
||||
@@ -589,10 +601,10 @@ void Network::prepare ()
|
||||
m_nodes.emplace_back (
|
||||
*this,
|
||||
config,
|
||||
m_clock_source,
|
||||
m_clock,
|
||||
m_journal);
|
||||
m_table.emplace (address, boost::ref (m_nodes.back()));
|
||||
address = next_address (address);
|
||||
address = next_endpoint (address);
|
||||
}
|
||||
|
||||
if (i != 0)
|
||||
@@ -600,8 +612,8 @@ void Network::prepare ()
|
||||
Node::Config config;
|
||||
config.canAccept = Random::getSystemRandom().nextInt (100) >=
|
||||
(m_params.firewalled * 100);
|
||||
config.listening_address = address;
|
||||
config.well_known_address = well_known_address;
|
||||
config.listening_endpoint = address;
|
||||
config.well_known_endpoint = well_known_endpoint;
|
||||
config.config.maxPeers = params().maxPeers;
|
||||
config.config.outPeers = params().outPeers;
|
||||
config.config.wantIncoming = true;
|
||||
@@ -610,10 +622,10 @@ void Network::prepare ()
|
||||
m_nodes.emplace_back (
|
||||
*this,
|
||||
config,
|
||||
m_clock_source,
|
||||
m_clock,
|
||||
m_journal);
|
||||
m_table.emplace (address, boost::ref (m_nodes.back()));
|
||||
address = next_address (address);
|
||||
address = next_endpoint (address);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -632,9 +644,9 @@ int Network::next_node_id ()
|
||||
return m_next_node_id++;
|
||||
}
|
||||
|
||||
DiscreteTime Network::now ()
|
||||
clock_type::time_point Network::now ()
|
||||
{
|
||||
return m_clock_source();
|
||||
return m_clock.now();
|
||||
}
|
||||
|
||||
Network::Peers& Network::nodes()
|
||||
@@ -649,7 +661,7 @@ Network::Peers const& Network::nodes() const
|
||||
}
|
||||
#endif
|
||||
|
||||
Node* Network::find (IPAddress const& address)
|
||||
Node* Network::find (IP::Endpoint const& address)
|
||||
{
|
||||
Table::iterator iter (m_table.find (address));
|
||||
if (iter != m_table.end())
|
||||
@@ -672,8 +684,8 @@ void Network::step ()
|
||||
// Advance the manual clock so that
|
||||
// messages are broadcast at every step.
|
||||
//
|
||||
//m_clock_source.now() += Tuning::secondsPerConnect;
|
||||
m_clock_source.now() += 1;
|
||||
//m_clock += Tuning::secondsPerConnect;
|
||||
++m_clock;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -695,7 +707,7 @@ struct PeerStats
|
||||
{
|
||||
PeerStats ()
|
||||
: inboundActive (0)
|
||||
, outboundActive (0)
|
||||
, out_active (0)
|
||||
, inboundSlotsFree (0)
|
||||
, outboundSlotsFree (0)
|
||||
{
|
||||
@@ -704,26 +716,26 @@ struct PeerStats
|
||||
template <typename Peer>
|
||||
explicit PeerStats (Peer const& peer)
|
||||
{
|
||||
inboundActive = peer.logic().slots().inboundActive();
|
||||
outboundActive = peer.logic().slots().outboundActive();
|
||||
inboundSlotsFree = peer.logic().slots().inboundSlotsFree();
|
||||
outboundSlotsFree = peer.logic().slots().outboundSlotsFree();
|
||||
inboundActive = peer.logic().counts().inboundActive();
|
||||
out_active = peer.logic().counts().out_active();
|
||||
inboundSlotsFree = peer.logic().counts().inboundSlotsFree();
|
||||
outboundSlotsFree = peer.logic().counts().outboundSlotsFree();
|
||||
}
|
||||
|
||||
PeerStats& operator+= (PeerStats const& rhs)
|
||||
{
|
||||
inboundActive += rhs.inboundActive;
|
||||
outboundActive += rhs.outboundActive;
|
||||
out_active += rhs.out_active;
|
||||
inboundSlotsFree += rhs.inboundSlotsFree;
|
||||
outboundSlotsFree += rhs.outboundSlotsFree;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int totalActive () const
|
||||
{ return inboundActive + outboundActive; }
|
||||
{ return inboundActive + out_active; }
|
||||
|
||||
int inboundActive;
|
||||
int outboundActive;
|
||||
int out_active;
|
||||
int inboundSlotsFree;
|
||||
int outboundSlotsFree;
|
||||
};
|
||||
@@ -766,7 +778,7 @@ public:
|
||||
double outPeers () const
|
||||
{
|
||||
if (m_size > 0)
|
||||
return double (m_stats.outboundActive) / m_size;
|
||||
return double (m_stats.out_active) / m_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -857,10 +869,10 @@ void report_nodes (NodeSequence const& nodes, Journal::Stream const& stream)
|
||||
Logic::State const& state (logic.state());
|
||||
stream <<
|
||||
rfield (node.id ()) <<
|
||||
rfield (state.slots.totalActive ()) <<
|
||||
rfield (state.slots.inboundActive ()) <<
|
||||
rfield (state.slots.outboundActive ()) <<
|
||||
rfield (state.slots.connectCount ()) <<
|
||||
rfield (state.counts.totalActive ()) <<
|
||||
rfield (state.counts.inboundActive ()) <<
|
||||
rfield (state.counts.out_active ()) <<
|
||||
rfield (state.counts.connectCount ()) <<
|
||||
rfield (state.livecache.size ()) <<
|
||||
rfield (state.bootcache.size ())
|
||||
;
|
||||
@@ -1024,7 +1036,7 @@ public:
|
||||
}
|
||||
n.journal().info <<
|
||||
divider () << std::endl <<
|
||||
"Time " << n.now () << std::endl <<
|
||||
"Time " << n.now ().time_since_epoch () << std::endl <<
|
||||
divider ()
|
||||
;
|
||||
|
||||
@@ -1049,7 +1061,7 @@ public:
|
||||
ss << std::endl <<
|
||||
"--------------" << std::endl <<
|
||||
"#" << node.id() <<
|
||||
" at " << node.listening_address ();
|
||||
" at " << node.listening_endpoint ();
|
||||
node.logic().dump (ss);
|
||||
}
|
||||
}
|
||||
@@ -1082,3 +1094,5 @@ static PeerFinderTests peerFinderTests;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -37,10 +37,7 @@ class Peers;
|
||||
|
||||
/** Represents a peer connection in the overlay.
|
||||
*/
|
||||
class Peer
|
||||
: public boost::enable_shared_from_this <Peer>
|
||||
, public List <Peer>::Node
|
||||
, private LeakChecked <Peer>
|
||||
class Peer : private LeakChecked <Peer>
|
||||
{
|
||||
public:
|
||||
typedef boost::shared_ptr <Peer> Ptr;
|
||||
@@ -74,28 +71,12 @@ public:
|
||||
};
|
||||
|
||||
public:
|
||||
static void accept (
|
||||
boost::shared_ptr <NativeSocketType> const& socket,
|
||||
Peers& peers,
|
||||
Resource::Manager& resourceManager,
|
||||
PeerFinder::Manager& peerFinder,
|
||||
boost::asio::ssl::context& ctx,
|
||||
bool proxyHandshake);
|
||||
|
||||
static void connect (
|
||||
IP::Endpoint const& address,
|
||||
boost::asio::io_service& io_service,
|
||||
Peers& peers,
|
||||
Resource::Manager& resourceManager,
|
||||
PeerFinder::Manager& peerFinder,
|
||||
boost::asio::ssl::context& ssl_context);
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
/** Called when an open slot is assigned to a handshaked peer. */
|
||||
virtual void activate () = 0;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
//virtual void connect (IPAddress const &address) = 0;
|
||||
//virtual void connect (IP::Endpoint const &address) = 0;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
virtual State state () const = 0;
|
||||
@@ -103,7 +84,7 @@ public:
|
||||
virtual void state (State new_state) = 0;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
virtual void detach (const char*, bool onIOStrand) = 0;
|
||||
//virtual void detach (const char*, bool onIOStrand) = 0;
|
||||
|
||||
virtual void sendPacket (const PackedMessage::pointer& packet, bool onStrand) = 0;
|
||||
|
||||
@@ -150,7 +131,7 @@ public:
|
||||
|
||||
virtual bool hasRange (uint32 uMin, uint32 uMax) = 0;
|
||||
|
||||
virtual IPAddress getRemoteAddress() const = 0;
|
||||
virtual IP::Endpoint getRemoteAddress() const = 0;
|
||||
|
||||
virtual NativeSocketType& getNativeSocket () = 0;
|
||||
};
|
||||
|
||||
@@ -73,6 +73,9 @@ public:
|
||||
//
|
||||
void handleTimer (boost::system::error_code ec)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted || isStopping ())
|
||||
return;
|
||||
|
||||
async_accept ();
|
||||
}
|
||||
|
||||
@@ -81,6 +84,9 @@ public:
|
||||
void handleAccept (boost::system::error_code ec,
|
||||
boost::shared_ptr <NativeSocketType> const& socket)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted || isStopping ())
|
||||
return;
|
||||
|
||||
bool delay = false;
|
||||
|
||||
if (! ec)
|
||||
|
||||
@@ -17,9 +17,10 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
namespace ripple {
|
||||
#ifndef RIPPLE_OVERLAY_PEERIMP_H_INCLUDED
|
||||
#define RIPPLE_OVERLAY_PEERIMP_H_INCLUDED
|
||||
|
||||
SETUP_LOG (Peer)
|
||||
namespace ripple {
|
||||
|
||||
class PeerImp;
|
||||
|
||||
@@ -60,6 +61,7 @@ struct get_usable_peers
|
||||
class PeerImp
|
||||
: public Peer
|
||||
, public CountedObject <PeerImp>
|
||||
, public boost::enable_shared_from_this <PeerImp>
|
||||
{
|
||||
private:
|
||||
/** Time alloted for a peer to send a HELLO message (DEPRECATED) */
|
||||
@@ -72,6 +74,8 @@ private:
|
||||
static const size_t sslMinimumFinishedLength = 12;
|
||||
|
||||
public:
|
||||
typedef boost::shared_ptr <PeerImp> ptr;
|
||||
|
||||
boost::shared_ptr <NativeSocketType> m_shared_socket;
|
||||
|
||||
Journal m_journal;
|
||||
@@ -84,7 +88,7 @@ public:
|
||||
// Updated at each stage of the connection process to reflect
|
||||
// the current conditions as closely as possible. This includes
|
||||
// the case where we learn the true IP via a PROXY handshake.
|
||||
IPAddress m_remoteAddress;
|
||||
IP::Endpoint m_remoteAddress;
|
||||
|
||||
// These is up here to prevent warnings about order of initializations
|
||||
//
|
||||
@@ -119,7 +123,7 @@ public:
|
||||
std::list<uint256> m_recentTxSets;
|
||||
mutable boost::mutex m_recentLock;
|
||||
|
||||
boost::asio::deadline_timer mActivityTimer;
|
||||
boost::asio::deadline_timer m_timer;
|
||||
|
||||
std::vector<uint8_t> m_readBuffer;
|
||||
std::list<PackedMessage::pointer> mSendQ;
|
||||
@@ -129,6 +133,9 @@ public:
|
||||
|
||||
Resource::Consumer m_usage;
|
||||
|
||||
// The slot assigned to us by PeerFinder
|
||||
PeerFinder::Slot::ptr m_slot;
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
/** New incoming peer from the specified socket */
|
||||
PeerImp (
|
||||
@@ -136,6 +143,7 @@ public:
|
||||
Peers& peers,
|
||||
Resource::Manager& resourceManager,
|
||||
PeerFinder::Manager& peerFinder,
|
||||
PeerFinder::Slot::ptr const& slot,
|
||||
boost::asio::ssl::context& ssl_context,
|
||||
MultiSocket::Flag flags)
|
||||
: m_shared_socket (socket)
|
||||
@@ -153,9 +161,9 @@ public:
|
||||
, m_clusterNode (false)
|
||||
, m_minLedger (0)
|
||||
, m_maxLedger (0)
|
||||
, mActivityTimer (socket->get_io_service())
|
||||
, m_timer (socket->get_io_service())
|
||||
, m_slot (slot)
|
||||
{
|
||||
m_peers.peerCreated (this);
|
||||
}
|
||||
|
||||
/** New outgoing peer
|
||||
@@ -169,6 +177,7 @@ public:
|
||||
Peers& peers,
|
||||
Resource::Manager& resourceManager,
|
||||
PeerFinder::Manager& peerFinder,
|
||||
PeerFinder::Slot::ptr const& slot,
|
||||
boost::asio::ssl::context& ssl_context,
|
||||
MultiSocket::Flag flags)
|
||||
: m_journal (LogPartition::getJournal <Peer> ())
|
||||
@@ -185,14 +194,14 @@ public:
|
||||
, m_clusterNode (false)
|
||||
, m_minLedger (0)
|
||||
, m_maxLedger (0)
|
||||
, mActivityTimer (io_service)
|
||||
, m_timer (io_service)
|
||||
, m_slot (slot)
|
||||
{
|
||||
m_peers.peerCreated (this);
|
||||
}
|
||||
|
||||
virtual ~PeerImp ()
|
||||
{
|
||||
m_peers.peerDestroyed (this);
|
||||
m_peers.remove (m_slot);
|
||||
}
|
||||
|
||||
NativeSocketType& getNativeSocket ()
|
||||
@@ -234,38 +243,94 @@ public:
|
||||
object and begins the process of connection establishment instead
|
||||
of requiring the caller to construct a Peer and call connect.
|
||||
*/
|
||||
void connect (IPAddress const& address)
|
||||
void connect (IP::Endpoint const& address)
|
||||
{
|
||||
m_remoteAddress = address;
|
||||
m_peers.addPeer (shared_from_this ());
|
||||
|
||||
m_journal.info << "Connecting to " << m_remoteAddress;
|
||||
|
||||
boost::system::error_code err;
|
||||
|
||||
mActivityTimer.expires_from_now (nodeVerifySeconds, err);
|
||||
m_timer.expires_from_now (nodeVerifySeconds, err);
|
||||
|
||||
mActivityTimer.async_wait (m_strand.wrap (boost::bind (
|
||||
&PeerImp::handleVerifyTimer,
|
||||
boost::static_pointer_cast <PeerImp> (shared_from_this ()),
|
||||
boost::asio::placeholders::error)));
|
||||
m_timer.async_wait (m_strand.wrap (boost::bind (&PeerImp::handleVerifyTimer,
|
||||
shared_from_this (), boost::asio::placeholders::error)));
|
||||
|
||||
if (err)
|
||||
{
|
||||
m_journal.error << "Failed to set verify timer.";
|
||||
detach ("c2", false);
|
||||
detach ("c2");
|
||||
return;
|
||||
}
|
||||
|
||||
m_peerFinder.onPeerConnect (m_remoteAddress);
|
||||
|
||||
getNativeSocket ().async_connect (
|
||||
IPAddressConversion::to_asio_endpoint (address),
|
||||
m_strand.wrap (boost::bind (
|
||||
&PeerImp::onConnect,
|
||||
m_strand.wrap (boost::bind (&PeerImp::onConnect,
|
||||
shared_from_this (), boost::asio::placeholders::error)));
|
||||
}
|
||||
|
||||
/** Disconnect a peer
|
||||
|
||||
The peer transitions from its current state into `stateGracefulClose`
|
||||
|
||||
@param rsn a code indicating why the peer was disconnected
|
||||
@param onIOStrand true if called on an I/O strand. It if is not, then
|
||||
a callback will be queued up.
|
||||
*/
|
||||
void detach (const char* rsn, bool graceful = true)
|
||||
{
|
||||
if (! m_strand.running_in_this_thread ())
|
||||
{
|
||||
m_strand.post (BIND_TYPE (&PeerImp::detach,
|
||||
shared_from_this (), rsn, graceful));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!m_detaching)
|
||||
{
|
||||
// NIKB TODO No - a race is NOT ok. This needs to be fixed
|
||||
// to have PeerFinder work reliably.
|
||||
m_detaching = true; // Race is ok.
|
||||
|
||||
m_peerFinder.on_closed (m_slot);
|
||||
|
||||
if (m_state == stateActive)
|
||||
m_peers.onPeerDisconnect (shared_from_this ());
|
||||
|
||||
m_state = stateGracefulClose;
|
||||
|
||||
if (m_clusterNode && m_journal.active(Journal::Severity::kWarning))
|
||||
m_journal.warning << "Cluster peer " << m_nodeName <<
|
||||
" detached: " << rsn;
|
||||
|
||||
mSendQ.clear ();
|
||||
|
||||
(void) m_timer.cancel ();
|
||||
|
||||
if (graceful)
|
||||
{
|
||||
m_socket->async_shutdown (
|
||||
m_strand.wrap ( boost::bind(
|
||||
&PeerImp::handleShutdown,
|
||||
boost::static_pointer_cast <PeerImp> (shared_from_this ()),
|
||||
boost::asio::placeholders::error)));
|
||||
}
|
||||
else
|
||||
{
|
||||
m_socket->cancel ();
|
||||
}
|
||||
|
||||
// VFALCO TODO Stop doing this.
|
||||
if (m_nodePublicKey.isValid ())
|
||||
m_nodePublicKey.clear (); // Be idempotent.
|
||||
}
|
||||
}
|
||||
|
||||
/** Close the connection. */
|
||||
void close (bool graceful)
|
||||
{
|
||||
detach ("stop", graceful);
|
||||
}
|
||||
|
||||
/** Outbound connection attempt has completed (not necessarily successfully)
|
||||
|
||||
@@ -280,24 +345,33 @@ public:
|
||||
|
||||
@param ec indicates success or an error code.
|
||||
*/
|
||||
void onConnect (boost::system::error_code const& ec)
|
||||
void onConnect (boost::system::error_code ec)
|
||||
{
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
NativeSocketType::endpoint_type local_endpoint;
|
||||
|
||||
if (! ec)
|
||||
local_endpoint = m_socket->this_layer <
|
||||
NativeSocketType> ().local_endpoint (ec);
|
||||
|
||||
if (ec)
|
||||
{
|
||||
// VFALCO NOTE This log statement looks like ass
|
||||
m_journal.info << "Connecting to " << m_remoteAddress <<
|
||||
" failed " << ec.message();
|
||||
|
||||
m_journal.info <<
|
||||
"Connect to " << m_remoteAddress <<
|
||||
" failed: " << ec.message();
|
||||
// This should end up calling onPeerClosed()
|
||||
detach ("hc", true);
|
||||
detach ("hc");
|
||||
return;
|
||||
}
|
||||
|
||||
bassert (m_state == stateConnecting);
|
||||
m_state = stateConnected;
|
||||
|
||||
m_peerFinder.onPeerConnected (m_socket->local_endpoint(),
|
||||
m_remoteAddress);
|
||||
m_peerFinder.on_connected (m_slot,
|
||||
IPAddressConversion::from_asio (local_endpoint));
|
||||
|
||||
m_socket->set_verify_mode (boost::asio::ssl::verify_none);
|
||||
m_socket->async_handshake (
|
||||
@@ -320,10 +394,6 @@ public:
|
||||
|
||||
m_journal.info << "Accepted " << m_remoteAddress;
|
||||
|
||||
m_peers.addPeer (shared_from_this ());
|
||||
|
||||
m_peerFinder.onPeerAccept (m_socket->local_endpoint(), m_remoteAddress);
|
||||
|
||||
m_socket->set_verify_mode (boost::asio::ssl::verify_none);
|
||||
m_socket->async_handshake (
|
||||
boost::asio::ssl::stream_base::server,
|
||||
@@ -353,56 +423,6 @@ public:
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
/** Disconnect a peer
|
||||
|
||||
The peer transitions from its current state into `stateGracefulClose`
|
||||
|
||||
@param rsn a code indicating why the peer was disconnected
|
||||
@param onIOStrand true if called on an I/O strand. It if is not, then
|
||||
a callback will be queued up.
|
||||
*/
|
||||
void detach (const char* rsn, bool onIOStrand)
|
||||
{
|
||||
// VFALCO NOTE So essentially, detach() is really two different functions
|
||||
// depending on the value of onIOStrand.
|
||||
// TODO Clean this up.
|
||||
//
|
||||
if (!onIOStrand)
|
||||
{
|
||||
m_strand.post (BIND_TYPE (&Peer::detach, shared_from_this (), rsn, true));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!m_detaching)
|
||||
{
|
||||
// NIKB TODO No - a race is NOT ok. This needs to be fixed
|
||||
// to have PeerFinder work reliably.
|
||||
m_detaching = true; // Race is ok.
|
||||
|
||||
m_peerFinder.onPeerClosed (m_remoteAddress);
|
||||
|
||||
if (m_state == stateActive)
|
||||
m_peers.onPeerDisconnect (shared_from_this ());
|
||||
|
||||
m_state = stateGracefulClose;
|
||||
|
||||
if (m_clusterNode && m_journal.active(Journal::Severity::kWarning))
|
||||
m_journal.warning << "Cluster peer " << m_nodeName <<
|
||||
" detached: " << rsn;
|
||||
|
||||
mSendQ.clear ();
|
||||
|
||||
(void) mActivityTimer.cancel ();
|
||||
m_socket->async_shutdown (
|
||||
m_strand.wrap ( boost::bind(
|
||||
&PeerImp::handleShutdown,
|
||||
boost::static_pointer_cast <PeerImp> (shared_from_this ()),
|
||||
boost::asio::placeholders::error)));
|
||||
|
||||
if (m_nodePublicKey.isValid ())
|
||||
m_nodePublicKey.clear (); // Be idempotent.
|
||||
}
|
||||
}
|
||||
|
||||
void sendPacket (const PackedMessage::pointer& packet, bool onStrand)
|
||||
{
|
||||
@@ -442,7 +462,7 @@ public:
|
||||
void charge (Resource::Charge const& fee)
|
||||
{
|
||||
if ((m_usage.charge (fee) == Resource::drop) && m_usage.disconnect ())
|
||||
detach ("resource", false);
|
||||
detach ("resource");
|
||||
}
|
||||
|
||||
Json::Value json ()
|
||||
@@ -606,7 +626,7 @@ public:
|
||||
return (uMin >= m_minLedger) && (uMax <= m_maxLedger);
|
||||
}
|
||||
|
||||
IPAddress getRemoteAddress() const
|
||||
IP::Endpoint getRemoteAddress() const
|
||||
{
|
||||
return m_remoteAddress;
|
||||
}
|
||||
@@ -614,25 +634,25 @@ public:
|
||||
private:
|
||||
void handleShutdown (boost::system::error_code const& ec)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
if (m_detaching)
|
||||
{
|
||||
m_peers.removePeer (shared_from_this());
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
return;
|
||||
}
|
||||
|
||||
if (ec)
|
||||
{
|
||||
m_journal.info << "Shutdown: " << ec.message ();
|
||||
detach ("hsd", true);
|
||||
detach ("hsd");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void handleWrite (boost::system::error_code const& ec, size_t bytes)
|
||||
{
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
// Call on IO strand
|
||||
|
||||
mSendingPacket.reset ();
|
||||
@@ -646,7 +666,7 @@ private:
|
||||
if (ec)
|
||||
{
|
||||
m_journal.info << "Write: " << ec.message ();
|
||||
detach ("hw", true);
|
||||
detach ("hw");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -665,16 +685,16 @@ private:
|
||||
void handleReadHeader (boost::system::error_code const& ec,
|
||||
std::size_t bytes)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
if (m_detaching)
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
m_journal.info << "ReadHeader: " << ec.message ();
|
||||
detach ("hrh1", true);
|
||||
detach ("hrh1");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -683,7 +703,7 @@ private:
|
||||
// WRITEME: Compare to maximum message length, abort if too large
|
||||
if ((msg_len > (32 * 1024 * 1024)) || (msg_len == 0))
|
||||
{
|
||||
detach ("hrh2", true);
|
||||
detach ("hrh2");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -693,10 +713,10 @@ private:
|
||||
void handleReadBody (boost::system::error_code const& ec,
|
||||
std::size_t bytes)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
if (m_detaching)
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
@@ -705,7 +725,7 @@ private:
|
||||
|
||||
{
|
||||
Application::ScopedLockType lock (getApp ().getMasterLock (), __FILE__, __LINE__);
|
||||
detach ("hrb", true);
|
||||
detach ("hrb");
|
||||
}
|
||||
|
||||
return;
|
||||
@@ -722,16 +742,16 @@ private:
|
||||
// is in progress.
|
||||
void handleStart (boost::system::error_code const& ec)
|
||||
{
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
if (m_detaching)
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
return;
|
||||
|
||||
if (ec)
|
||||
{
|
||||
m_journal.info << "Handshake: " << ec.message ();
|
||||
detach ("hs", true);
|
||||
detach ("hs");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -744,14 +764,14 @@ private:
|
||||
|
||||
if (m_usage.disconnect ())
|
||||
{
|
||||
detach ("resource", true);
|
||||
detach ("resource");
|
||||
return;
|
||||
}
|
||||
|
||||
if(!sendHello ())
|
||||
{
|
||||
m_journal.error << "Unable to send HELLO to " << m_remoteAddress;
|
||||
detach ("hello", true);
|
||||
detach ("hello");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -760,6 +780,9 @@ private:
|
||||
|
||||
void handleVerifyTimer (boost::system::error_code const& ec)
|
||||
{
|
||||
if (m_detaching)
|
||||
return;
|
||||
|
||||
if (ec == boost::asio::error::operation_aborted)
|
||||
{
|
||||
// Timer canceled because deadline no longer needed.
|
||||
@@ -772,7 +795,7 @@ private:
|
||||
{
|
||||
// m_journal.info << "Verify: Peer failed to verify in time.";
|
||||
|
||||
detach ("hvt", true);
|
||||
detach ("hvt");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -795,14 +818,14 @@ private:
|
||||
if ((m_state == stateHandshaked) && (type == protocol::mtHELLO))
|
||||
{
|
||||
m_journal.warning << "Protocol: HELLO expected!";
|
||||
detach ("prb-hello-expected", true);
|
||||
detach ("prb-hello-expected");
|
||||
return;
|
||||
}
|
||||
|
||||
if ((m_state == stateActive) && (type == protocol::mtHELLO))
|
||||
{
|
||||
m_journal.warning << "Protocol: HELLO unexpected!";
|
||||
detach ("prb-hello-unexpected", true);
|
||||
detach ("prb-hello-unexpected");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1307,7 +1330,7 @@ private:
|
||||
{
|
||||
bool bDetach = true;
|
||||
|
||||
(void) mActivityTimer.cancel ();
|
||||
(void) m_timer.cancel ();
|
||||
|
||||
uint32 const ourTime (getApp().getOPs ().getNetworkTimeNC ());
|
||||
uint32 const minTime (ourTime - clockToleranceDeltaSeconds);
|
||||
@@ -1382,8 +1405,8 @@ private:
|
||||
bassert (m_state == stateConnected);
|
||||
m_state = stateHandshaked;
|
||||
|
||||
m_peerFinder.onPeerHandshake (m_remoteAddress,
|
||||
RipplePublicKey(m_nodePublicKey), m_clusterNode);
|
||||
m_peerFinder.on_handshake (m_slot, RipplePublicKey(m_nodePublicKey),
|
||||
m_clusterNode);
|
||||
|
||||
// XXX Set timer: connection is in grace period to be useful.
|
||||
// XXX Set timer: connection idle (idle may vary depending on connection type.)
|
||||
@@ -1408,7 +1431,7 @@ private:
|
||||
if (bDetach)
|
||||
{
|
||||
m_nodePublicKey.clear ();
|
||||
detach ("recvh", true);
|
||||
detach ("recvh");
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1448,9 +1471,9 @@ private:
|
||||
{
|
||||
protocol::TMLoadSource const& node = packet.loadsources (i);
|
||||
Resource::Gossip::Item item;
|
||||
item.address = IPAddress::from_string (node.name());
|
||||
item.address = IP::Endpoint::from_string (node.name());
|
||||
item.balance = node.cost();
|
||||
if (item.address != IPAddress())
|
||||
if (item.address != IP::Endpoint())
|
||||
gossip.items.push_back(item);
|
||||
}
|
||||
m_resourceManager.importConsumers (m_nodeName, gossip);
|
||||
@@ -1608,7 +1631,7 @@ private:
|
||||
// TODO: filter out all the LAN peers
|
||||
void recvPeers (protocol::TMPeers& packet)
|
||||
{
|
||||
std::vector <IPAddress> list;
|
||||
std::vector <IP::Endpoint> list;
|
||||
list.reserve (packet.nodes().size());
|
||||
for (int i = 0; i < packet.nodes ().size (); ++i)
|
||||
{
|
||||
@@ -1618,13 +1641,13 @@ private:
|
||||
|
||||
{
|
||||
IP::AddressV4 v4 (ntohl (addr.s_addr));
|
||||
IPAddress address (v4, packet.nodes (i).ipv4port ());
|
||||
IP::Endpoint address (v4, packet.nodes (i).ipv4port ());
|
||||
list.push_back (address);
|
||||
}
|
||||
}
|
||||
|
||||
if (! list.empty())
|
||||
m_peerFinder.onLegacyEndpoints (list);
|
||||
m_peerFinder.on_legacy_endpoints (list);
|
||||
}
|
||||
|
||||
void recvEndpoints (protocol::TMEndpoints& packet)
|
||||
@@ -1647,13 +1670,13 @@ private:
|
||||
in_addr addr;
|
||||
addr.s_addr = tm.ipv4().ipv4();
|
||||
IP::AddressV4 v4 (ntohl (addr.s_addr));
|
||||
endpoint.address = IPAddress (v4, tm.ipv4().ipv4port ());
|
||||
endpoint.address = IP::Endpoint (v4, tm.ipv4().ipv4port ());
|
||||
}
|
||||
else
|
||||
{
|
||||
// This Endpoint describes the peer we are connected to.
|
||||
// We will take the remote address seen on the socket and
|
||||
// store that in the IPAddress. If this is the first time,
|
||||
// store that in the IP::Endpoint. If this is the first time,
|
||||
// then we'll verify that their listener can receive incoming
|
||||
// by performing a connectivity test.
|
||||
//
|
||||
@@ -1665,7 +1688,7 @@ private:
|
||||
}
|
||||
|
||||
if (! endpoints.empty())
|
||||
m_peerFinder.onPeerEndpoints (m_remoteAddress, endpoints);
|
||||
m_peerFinder.on_endpoints (m_slot, endpoints);
|
||||
}
|
||||
|
||||
void recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObjectByHash>& ptr)
|
||||
@@ -2714,58 +2737,6 @@ void Peer::charge (boost::weak_ptr <Peer>& peer, Resource::Charge const& fee)
|
||||
p->charge (fee);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void Peer::accept (
|
||||
boost::shared_ptr <NativeSocketType> const& socket,
|
||||
Peers& peers,
|
||||
Resource::Manager& resourceManager,
|
||||
PeerFinder::Manager& peerFinder,
|
||||
boost::asio::ssl::context& ssl_context,
|
||||
bool proxyHandshake)
|
||||
{
|
||||
MultiSocket::Flag flags (
|
||||
MultiSocket::Flag::server_role | MultiSocket::Flag::ssl_required);
|
||||
|
||||
if (proxyHandshake)
|
||||
flags = flags.with (MultiSocket::Flag::proxy);
|
||||
|
||||
boost::shared_ptr<PeerImp> peer (boost::make_shared <PeerImp> (
|
||||
socket,
|
||||
peers,
|
||||
resourceManager,
|
||||
peerFinder,
|
||||
ssl_context,
|
||||
flags));
|
||||
|
||||
peer->accept ();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void Peer::connect (
|
||||
IP::Endpoint const& address,
|
||||
boost::asio::io_service& io_service,
|
||||
Peers& peers,
|
||||
Resource::Manager& resourceManager,
|
||||
PeerFinder::Manager& peerFinder,
|
||||
boost::asio::ssl::context& ssl_context)
|
||||
{
|
||||
MultiSocket::Flag flags (
|
||||
MultiSocket::Flag::client_role | MultiSocket::Flag::ssl);
|
||||
|
||||
boost::shared_ptr<PeerImp> peer (boost::make_shared <PeerImp> (
|
||||
io_service,
|
||||
peers,
|
||||
resourceManager,
|
||||
peerFinder,
|
||||
ssl_context,
|
||||
flags));
|
||||
|
||||
peer->connect (address);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
const boost::posix_time::seconds PeerImp::nodeVerifySeconds (15);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -2825,3 +2796,5 @@ std::ostream& operator<< (std::ostream& os, Peer const* peer)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -18,12 +18,16 @@
|
||||
//==============================================================================
|
||||
|
||||
#include "PeerDoor.h"
|
||||
#include "PeerImp.h"
|
||||
|
||||
#include <boost/config.hpp>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
SETUP_LOG (Peer)
|
||||
|
||||
class PeersLog;
|
||||
template <> char const* LogPartition::getPartitionName <PeersLog> () { return "Peers"; }
|
||||
|
||||
@@ -80,7 +84,10 @@ class PeersImp
|
||||
, public LeakChecked <PeersImp>
|
||||
{
|
||||
public:
|
||||
typedef boost::unordered_map <IPAddress, Peer::pointer> PeerByIP;
|
||||
typedef std::unordered_map <PeerFinder::Slot::ptr,
|
||||
boost::weak_ptr <PeerImp>> PeersBySlot;
|
||||
typedef std::unordered_map <IP::Endpoint,
|
||||
boost::weak_ptr <PeerImp>> PeersByIP;
|
||||
|
||||
typedef boost::unordered_map <
|
||||
RippleAddress, Peer::pointer> PeerByPublicKey;
|
||||
@@ -104,8 +111,8 @@ public:
|
||||
boost::asio::io_service& m_io_service;
|
||||
boost::asio::ssl::context& m_ssl_context;
|
||||
|
||||
/** Tracks peers by their IP address and port */
|
||||
PeerByIP m_ipMap;
|
||||
/** Associates slots to peers. */
|
||||
PeersBySlot m_peers;
|
||||
|
||||
/** Tracks peers by their public key */
|
||||
PeerByPublicKey m_publicKeyMap;
|
||||
@@ -113,9 +120,6 @@ public:
|
||||
/** Tracks peers by their session ID */
|
||||
PeerByShortId m_shortIdMap;
|
||||
|
||||
/** Tracks all instances of peer objects */
|
||||
List <Peer> m_list;
|
||||
|
||||
/** The peer door for regular SSL connections */
|
||||
std::unique_ptr <PeerDoor> m_doorDirect;
|
||||
|
||||
@@ -148,12 +152,12 @@ public:
|
||||
*this,
|
||||
siteFiles,
|
||||
*this,
|
||||
get_seconds_clock (),
|
||||
LogPartition::getJournal <PeerFinderLog> ())))
|
||||
, m_io_service (io_service)
|
||||
, m_ssl_context (ssl_context)
|
||||
, m_resolver (resolver)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
~PeersImp ()
|
||||
@@ -162,38 +166,86 @@ public:
|
||||
// This is just to catch improper use of the Stoppable API.
|
||||
//
|
||||
std::unique_lock <decltype(m_mutex)> lock (m_mutex);
|
||||
#ifdef BOOST_NO_CXX11_LAMBDAS
|
||||
while (m_child_count != 0)
|
||||
m_cond.wait (lock);
|
||||
#else
|
||||
m_cond.wait (lock, [this] {
|
||||
return this->m_child_count == 0; });
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void accept (
|
||||
bool proxyHandshake,
|
||||
boost::shared_ptr <NativeSocketType> const& socket)
|
||||
{
|
||||
Peer::accept (
|
||||
socket,
|
||||
*this,
|
||||
m_resourceManager,
|
||||
*m_peerFinder,
|
||||
m_ssl_context,
|
||||
proxyHandshake);
|
||||
// An error getting an endpoint means the connection closed.
|
||||
// Just do nothing and the socket will be closed by the caller.
|
||||
boost::system::error_code ec;
|
||||
auto const local_endpoint_native (socket->local_endpoint (ec));
|
||||
if (ec)
|
||||
return;
|
||||
auto const remote_endpoint_native (socket->remote_endpoint (ec));
|
||||
if (ec)
|
||||
return;
|
||||
|
||||
auto const local_endpoint (
|
||||
IPAddressConversion::from_asio (local_endpoint_native));
|
||||
auto const remote_endpoint (
|
||||
IPAddressConversion::from_asio (remote_endpoint_native));
|
||||
|
||||
PeerFinder::Slot::ptr const slot (m_peerFinder->new_inbound_slot (
|
||||
local_endpoint, remote_endpoint));
|
||||
|
||||
if (slot == nullptr)
|
||||
return;
|
||||
|
||||
MultiSocket::Flag flags (
|
||||
MultiSocket::Flag::server_role | MultiSocket::Flag::ssl_required);
|
||||
|
||||
if (proxyHandshake)
|
||||
flags = flags.with (MultiSocket::Flag::proxy);
|
||||
|
||||
PeerImp::ptr const peer (boost::make_shared <PeerImp> (
|
||||
socket, *this, m_resourceManager, *m_peerFinder,
|
||||
slot, m_ssl_context, flags));
|
||||
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
{
|
||||
std::pair <PeersBySlot::iterator, bool> const result (
|
||||
m_peers.emplace (slot, peer));
|
||||
assert (result.second);
|
||||
}
|
||||
++m_child_count;
|
||||
}
|
||||
|
||||
void connect (IP::Endpoint const& address)
|
||||
// VFALCO NOTE Why not do this in the ctor?
|
||||
peer->accept ();
|
||||
}
|
||||
|
||||
void connect (IP::Endpoint const& remote_endpoint)
|
||||
{
|
||||
Peer::connect (
|
||||
address,
|
||||
m_io_service,
|
||||
*this,
|
||||
m_resourceManager,
|
||||
*m_peerFinder,
|
||||
m_ssl_context);
|
||||
PeerFinder::Slot::ptr const slot (
|
||||
m_peerFinder->new_outbound_slot (remote_endpoint));
|
||||
|
||||
if (slot == nullptr)
|
||||
return;
|
||||
|
||||
MultiSocket::Flag const flags (
|
||||
MultiSocket::Flag::client_role | MultiSocket::Flag::ssl);
|
||||
|
||||
PeerImp::ptr const peer (boost::make_shared <PeerImp> (
|
||||
m_io_service, *this, m_resourceManager, *m_peerFinder,
|
||||
slot, m_ssl_context, flags));
|
||||
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
{
|
||||
std::pair <PeersBySlot::iterator, bool> const result (
|
||||
m_peers.emplace (slot, peer));
|
||||
assert (result.second);
|
||||
}
|
||||
++m_child_count;
|
||||
}
|
||||
|
||||
// VFALCO NOTE Why not do this in the ctor?
|
||||
peer->connect (remote_endpoint);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
@@ -207,17 +259,12 @@ public:
|
||||
if (areChildrenStopped () && m_child_count == 0)
|
||||
{
|
||||
m_cond.notify_all ();
|
||||
m_journal.info <<
|
||||
"Stopped.";
|
||||
stopped ();
|
||||
}
|
||||
}
|
||||
|
||||
// Increment the count of dependent objects
|
||||
// Caller must hold the mutex
|
||||
void addref ()
|
||||
{
|
||||
++m_child_count;
|
||||
}
|
||||
|
||||
// Decrement the count of dependent objects
|
||||
// Caller must hold the mutex
|
||||
void release ()
|
||||
@@ -226,17 +273,14 @@ public:
|
||||
check_stopped ();
|
||||
}
|
||||
|
||||
void peerCreated (Peer* peer)
|
||||
void remove (PeerFinder::Slot::ptr const& slot)
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
m_list.push_back (*peer);
|
||||
addref();
|
||||
}
|
||||
|
||||
void peerDestroyed (Peer* peer)
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
m_list.erase (m_list.iterator_to (*peer));
|
||||
PeersBySlot::iterator const iter (m_peers.find (slot));
|
||||
assert (iter != m_peers.end ());
|
||||
m_peers.erase (iter);
|
||||
|
||||
release();
|
||||
}
|
||||
|
||||
@@ -246,41 +290,28 @@ public:
|
||||
//
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void connectPeers (std::vector <IPAddress> const& list)
|
||||
void connect (std::vector <IP::Endpoint> const& list)
|
||||
{
|
||||
for (std::vector <IPAddress>::const_iterator iter (list.begin());
|
||||
for (std::vector <IP::Endpoint>::const_iterator iter (list.begin());
|
||||
iter != list.end(); ++iter)
|
||||
connect (*iter);
|
||||
}
|
||||
|
||||
void disconnectPeer (IPAddress const& address, bool graceful)
|
||||
void activate (PeerFinder::Slot::ptr const& slot)
|
||||
{
|
||||
m_journal.trace <<
|
||||
"disconnectPeer (" << address <<
|
||||
", " << graceful << ")";
|
||||
"Activate " << slot->remote_endpoint();
|
||||
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
|
||||
PeerByIP::iterator const it (m_ipMap.find (address));
|
||||
|
||||
if (it != m_ipMap.end ())
|
||||
it->second->detach ("disc", false);
|
||||
PeersBySlot::iterator const iter (m_peers.find (slot));
|
||||
assert (iter != m_peers.end ());
|
||||
PeerImp::ptr const peer (iter->second.lock());
|
||||
assert (peer != nullptr);
|
||||
peer->activate ();
|
||||
}
|
||||
|
||||
void activatePeer (IPAddress const& remote_address)
|
||||
{
|
||||
m_journal.trace <<
|
||||
"activatePeer (" << remote_address << ")";
|
||||
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
|
||||
PeerByIP::iterator const it (m_ipMap.find (remote_address));
|
||||
|
||||
if (it != m_ipMap.end ())
|
||||
it->second->activate();
|
||||
}
|
||||
|
||||
void sendEndpoints (IPAddress const& remote_address,
|
||||
void send (PeerFinder::Slot::ptr const& slot,
|
||||
std::vector <PeerFinder::Endpoint> const& endpoints)
|
||||
{
|
||||
bassert (! endpoints.empty());
|
||||
@@ -309,16 +340,33 @@ public:
|
||||
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
PeerByIP::iterator const iter (m_ipMap.find (remote_address));
|
||||
// Address must exist!
|
||||
check_postcondition (iter != m_ipMap.end());
|
||||
Peer::pointer peer (iter->second);
|
||||
// VFALCO TODO Why are we checking isConnected? That should not be needed
|
||||
PeersBySlot::iterator const iter (m_peers.find (slot));
|
||||
assert (iter != m_peers.end ());
|
||||
PeerImp::ptr const peer (iter->second.lock());
|
||||
assert (peer != nullptr);
|
||||
// VFALCO TODO Why are we checking isConnected?
|
||||
// That should not be needed
|
||||
if (peer->isConnected())
|
||||
peer->sendPacket (msg, false);
|
||||
}
|
||||
}
|
||||
|
||||
void disconnect (PeerFinder::Slot::ptr const& slot, bool graceful)
|
||||
{
|
||||
if (m_journal.trace) m_journal.trace <<
|
||||
"Disconnect " << slot->remote_endpoint () <<
|
||||
(graceful ? "gracefully" : "");
|
||||
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
|
||||
PeersBySlot::iterator const iter (m_peers.find (slot));
|
||||
assert (iter != m_peers.end ());
|
||||
PeerImp::ptr const peer (iter->second.lock());
|
||||
assert (peer != nullptr);
|
||||
peer->close (graceful);
|
||||
//peer->detach ("disc", false);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
//
|
||||
// Stoppable
|
||||
@@ -329,6 +377,7 @@ public:
|
||||
{
|
||||
PeerFinder::Config config;
|
||||
|
||||
if (getConfig ().PEERS_MAX != 0)
|
||||
config.maxPeers = getConfig ().PEERS_MAX;
|
||||
|
||||
config.outPeers = config.calcOutPeers();
|
||||
@@ -367,7 +416,7 @@ public:
|
||||
{ }
|
||||
|
||||
void operator()(std::string const& name,
|
||||
std::vector <IPAddress> const& address)
|
||||
std::vector <IP::Endpoint> const& address)
|
||||
{
|
||||
if (!address.empty())
|
||||
m_peerFinder->addFixedPeer (name, address);
|
||||
@@ -404,19 +453,19 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
// Close all peer connections. If graceful is true then the peer objects
|
||||
// will wait for pending i/o before closing the socket. No new data will
|
||||
// be sent.
|
||||
//
|
||||
// The caller must hold the mutex
|
||||
//
|
||||
// VFALCO TODO implement the graceful flag
|
||||
//
|
||||
/** Close all peer connections.
|
||||
If `graceful` is true then active
|
||||
Requirements:
|
||||
Caller must hold the mutex.
|
||||
*/
|
||||
void close_all (bool graceful)
|
||||
{
|
||||
for (List <Peer>::iterator iter (m_list.begin ());
|
||||
iter != m_list.end(); ++iter)
|
||||
iter->detach ("stop", false);
|
||||
for (auto entry : m_peers)
|
||||
{
|
||||
PeerImp::ptr const peer (entry.second.lock());
|
||||
assert (peer != nullptr);
|
||||
peer->close (graceful);
|
||||
}
|
||||
}
|
||||
|
||||
void onStop ()
|
||||
@@ -424,8 +473,8 @@ public:
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
// Take off the extra count we added in the constructor
|
||||
release();
|
||||
// Close all peers
|
||||
close_all (true);
|
||||
|
||||
close_all (false);
|
||||
}
|
||||
|
||||
void onChildrenStopped ()
|
||||
@@ -535,33 +584,6 @@ public:
|
||||
iter->second;
|
||||
return Peer::pointer();
|
||||
}
|
||||
|
||||
// TODO NIKB Rename these two functions. It's not immediately clear
|
||||
// what they do: create a tracking entry for a peer by
|
||||
// the peer's remote IP.
|
||||
/** Start tracking a peer */
|
||||
void addPeer (Peer::Ptr const& peer)
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
|
||||
check_precondition (! isStopping ());
|
||||
|
||||
m_journal.error << "Adding peer: " << peer->getRemoteAddress();
|
||||
|
||||
std::pair <PeerByIP::iterator, bool> result (m_ipMap.emplace (
|
||||
boost::unordered::piecewise_construct,
|
||||
boost::make_tuple (peer->getRemoteAddress()),
|
||||
boost::make_tuple (peer)));
|
||||
|
||||
check_postcondition (result.second);
|
||||
}
|
||||
|
||||
/** Stop tracking a peer */
|
||||
void removePeer (Peer::Ptr const& peer)
|
||||
{
|
||||
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
|
||||
m_ipMap.erase (peer->getRemoteAddress());
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -20,6 +20,9 @@
|
||||
#ifndef RIPPLE_PEERS_H_INCLUDED
|
||||
#define RIPPLE_PEERS_H_INCLUDED
|
||||
|
||||
// VFALCO TODO Remove this include dependency it shouldn't be needed
|
||||
#include "../../ripple/peerfinder/api/Slot.h"
|
||||
|
||||
namespace ripple {
|
||||
|
||||
namespace PeerFinder {
|
||||
@@ -66,14 +69,10 @@ public:
|
||||
|
||||
virtual ~Peers () = 0;
|
||||
|
||||
// NIKB TODO This is an implementation detail - a private
|
||||
// interface between Peers and Peer. It should
|
||||
// be split out and moved elsewhere.
|
||||
//
|
||||
// VFALCO NOTE PeerImp should have visbility to PeersImp
|
||||
//
|
||||
virtual void peerCreated (Peer* peer) = 0;
|
||||
virtual void peerDestroyed (Peer *peer) = 0;
|
||||
// VFALCO NOTE These should be a private API
|
||||
/** @{ */
|
||||
virtual void remove (PeerFinder::Slot::ptr const& slot) = 0;
|
||||
/** @} */
|
||||
|
||||
virtual void accept (bool proxyHandshake,
|
||||
boost::shared_ptr <NativeSocketType> const& socket) = 0;
|
||||
@@ -93,9 +92,6 @@ public:
|
||||
// Peer 64-bit ID function
|
||||
virtual Peer::pointer findPeerByShortID (Peer::ShortId const& id) = 0;
|
||||
|
||||
virtual void addPeer (Peer::Ptr const& peer) = 0;
|
||||
virtual void removePeer (Peer::Ptr const& peer) = 0;
|
||||
|
||||
/** Visit every active peer and return a value
|
||||
The functor must:
|
||||
- Be callable as:
|
||||
|
||||
@@ -46,6 +46,5 @@ namespace ripple {
|
||||
#include "peers/PackedMessage.cpp"
|
||||
}
|
||||
|
||||
#include "peers/Peer.cpp"
|
||||
#include "peers/PeerDoor.cpp"
|
||||
#include "peers/Peers.cpp"
|
||||
|
||||
Reference in New Issue
Block a user