Add support for reserved peer slots:

This commit allows server operators to reserve slots for specific
peers (identified by the peer's public node identity) and to make
changes to the reservations while the server is operating.

This commit closes #2938
This commit is contained in:
John Freeman
2019-05-14 14:50:37 -05:00
committed by Nik Bougalis
parent 20cc5df5fe
commit 87e9ee5ce9
25 changed files with 553 additions and 52 deletions

View File

@@ -2176,6 +2176,7 @@ else ()
src/ripple/overlay/impl/Message.cpp src/ripple/overlay/impl/Message.cpp
src/ripple/overlay/impl/OverlayImpl.cpp src/ripple/overlay/impl/OverlayImpl.cpp
src/ripple/overlay/impl/PeerImp.cpp src/ripple/overlay/impl/PeerImp.cpp
src/ripple/overlay/impl/PeerReservationTable.cpp
src/ripple/overlay/impl/PeerSet.cpp src/ripple/overlay/impl/PeerSet.cpp
src/ripple/overlay/impl/TMHello.cpp src/ripple/overlay/impl/TMHello.cpp
src/ripple/overlay/impl/TrafficCount.cpp src/ripple/overlay/impl/TrafficCount.cpp
@@ -2242,6 +2243,7 @@ else ()
src/ripple/rpc/handlers/Ping.cpp src/ripple/rpc/handlers/Ping.cpp
src/ripple/rpc/handlers/Print.cpp src/ripple/rpc/handlers/Print.cpp
src/ripple/rpc/handlers/Random.cpp src/ripple/rpc/handlers/Random.cpp
src/ripple/rpc/handlers/Reservations.cpp
src/ripple/rpc/handlers/RipplePathFind.cpp src/ripple/rpc/handlers/RipplePathFind.cpp
src/ripple/rpc/handlers/ServerInfo.cpp src/ripple/rpc/handlers/ServerInfo.cpp
src/ripple/rpc/handlers/ServerState.cpp src/ripple/rpc/handlers/ServerState.cpp

View File

@@ -53,6 +53,7 @@
#include <ripple/nodestore/DummyScheduler.h> #include <ripple/nodestore/DummyScheduler.h>
#include <ripple/nodestore/DatabaseShard.h> #include <ripple/nodestore/DatabaseShard.h>
#include <ripple/overlay/Cluster.h> #include <ripple/overlay/Cluster.h>
#include <ripple/overlay/PeerReservationTable.h>
#include <ripple/overlay/make_Overlay.h> #include <ripple/overlay/make_Overlay.h>
#include <ripple/protocol/BuildInfo.h> #include <ripple/protocol/BuildInfo.h>
#include <ripple/protocol/Feature.h> #include <ripple/protocol/Feature.h>
@@ -350,6 +351,7 @@ public:
TaggedCache <uint256, AcceptedLedger> m_acceptedLedgerCache; TaggedCache <uint256, AcceptedLedger> m_acceptedLedgerCache;
std::unique_ptr <NetworkOPs> m_networkOPs; std::unique_ptr <NetworkOPs> m_networkOPs;
std::unique_ptr <Cluster> cluster_; std::unique_ptr <Cluster> cluster_;
std::unique_ptr <PeerReservationTable> peerReservations_;
std::unique_ptr <ManifestCache> validatorManifests_; std::unique_ptr <ManifestCache> validatorManifests_;
std::unique_ptr <ManifestCache> publisherManifests_; std::unique_ptr <ManifestCache> publisherManifests_;
std::unique_ptr <ValidatorList> validators_; std::unique_ptr <ValidatorList> validators_;
@@ -494,6 +496,8 @@ public:
, cluster_ (std::make_unique<Cluster> ( , cluster_ (std::make_unique<Cluster> (
logs_->journal("Overlay"))) logs_->journal("Overlay")))
, peerReservations_(std::make_unique<PeerReservationTable>(logs_->journal("PeerReservationTable")))
, validatorManifests_ (std::make_unique<ManifestCache> ( , validatorManifests_ (std::make_unique<ManifestCache> (
logs_->journal("ManifestCache"))) logs_->journal("ManifestCache")))
@@ -774,6 +778,11 @@ public:
return *cluster_; return *cluster_;
} }
PeerReservationTable& peerReservations () override
{
return *peerReservations_;
}
SHAMapStore& getSHAMapStore () override SHAMapStore& getSHAMapStore () override
{ {
return *m_shaMapStore; return *m_shaMapStore;
@@ -1312,6 +1321,12 @@ bool ApplicationImp::setup()
if (!initSQLiteDBs() || !initNodeStoreDBs()) if (!initSQLiteDBs() || !initNodeStoreDBs())
return false; return false;
if (!peerReservations_->load(getWalletDB()))
{
JLOG(m_journal.fatal()) << "Cannot find peer reservations!";
return false;
}
if (validatorKeys_.publicKey.size()) if (validatorKeys_.publicKey.size())
setMaxDisallowedLedger(); setMaxDisallowedLedger();

View File

@@ -26,6 +26,7 @@
#include <ripple/core/Config.h> #include <ripple/core/Config.h>
#include <ripple/protocol/Protocol.h> #include <ripple/protocol/Protocol.h>
#include <ripple/beast/utility/PropertyStream.h> #include <ripple/beast/utility/PropertyStream.h>
#include <ripple/overlay/PeerReservationTable.h>
#include <boost/asio.hpp> #include <boost/asio.hpp>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
@@ -139,6 +140,7 @@ public:
virtual ManifestCache& validatorManifests () = 0; virtual ManifestCache& validatorManifests () = 0;
virtual ManifestCache& publisherManifests () = 0; virtual ManifestCache& publisherManifests () = 0;
virtual Cluster& cluster () = 0; virtual Cluster& cluster () = 0;
virtual PeerReservationTable& peerReservations () = 0;
virtual RCLValidations& getValidations () = 0; virtual RCLValidations& getValidations () = 0;
virtual NodeStore::Database& getNodeStore () = 0; virtual NodeStore::Database& getNodeStore () = 0;
virtual NodeStore::DatabaseShard* getShardStore() = 0; virtual NodeStore::DatabaseShard* getShardStore() = 0;

View File

@@ -115,6 +115,12 @@ const char* WalletDBInit[] =
PrivateKey CHARACTER(52) \ PrivateKey CHARACTER(52) \
);", );",
// Peer reservations.
"CREATE TABLE IF NOT EXISTS PeerReservations ( \
PublicKey CHARACTER(53) UNIQUE NOT NULL, \
Description CHARACTER(64) NOT NULL \
);",
// Validator Manifests // Validator Manifests
"CREATE TABLE IF NOT EXISTS ValidatorManifests ( \ "CREATE TABLE IF NOT EXISTS ValidatorManifests ( \
RawData BLOB NOT NULL \ RawData BLOB NOT NULL \

View File

@@ -149,6 +149,9 @@ void printHelp (const po::options_description& desc)
" peers\n" " peers\n"
" ping\n" " ping\n"
" random\n" " random\n"
" peer_reservations_add <public_key> [<description>]\n"
" peer_reservations_del <public_key>\n"
" peer_reservations_list\n"
" ripple ...\n" " ripple ...\n"
" ripple_path_find <json> [<ledger>]\n" " ripple_path_find <json> [<ledger>]\n"
" server_info [counters]\n" " server_info [counters]\n"

View File

@@ -51,7 +51,8 @@ DatabaseCon::DatabaseCon (
} }
catch (soci::soci_error&) catch (soci::soci_error&)
{ {
// ignore errors // TODO: We should at least log this error. It is annoying to wire
// a logger into every context, but there are other solutions.
} }
} }
} }

View File

@@ -849,6 +849,26 @@ private:
return jvRequest; return jvRequest;
} }
// peer_reservations_add <public_key> [<name>]
Json::Value parsePeerReservationsAdd (Json::Value const& jvParams)
{
Json::Value jvRequest;
jvRequest[jss::public_key] = jvParams[0u].asString();
if (jvParams.size() > 1)
{
jvRequest[jss::description] = jvParams[1u].asString();
}
return jvRequest;
}
// peer_reservations_del <public_key>
Json::Value parsePeerReservationsDel (Json::Value const& jvParams)
{
Json::Value jvRequest;
jvRequest[jss::public_key] = jvParams[0u].asString();
return jvRequest;
}
// ripple_path_find <json> [<ledger>] // ripple_path_find <json> [<ledger>]
Json::Value parseRipplePathFind (Json::Value const& jvParams) Json::Value parseRipplePathFind (Json::Value const& jvParams)
{ {
@@ -1127,6 +1147,9 @@ public:
{ "print", &RPCParser::parseAsIs, 0, 1 }, { "print", &RPCParser::parseAsIs, 0, 1 },
// { "profile", &RPCParser::parseProfile, 1, 9 }, // { "profile", &RPCParser::parseProfile, 1, 9 },
{ "random", &RPCParser::parseAsIs, 0, 0 }, { "random", &RPCParser::parseAsIs, 0, 0 },
{ "peer_reservations_add", &RPCParser::parsePeerReservationsAdd, 1, 2 },
{ "peer_reservations_del", &RPCParser::parsePeerReservationsDel, 1, 1 },
{ "peer_reservations_list", &RPCParser::parseAsIs, 0, 0 },
{ "ripple_path_find", &RPCParser::parseRipplePathFind, 1, 2 }, { "ripple_path_find", &RPCParser::parseRipplePathFind, 1, 2 },
{ "sign", &RPCParser::parseSignSubmit, 2, 3 }, { "sign", &RPCParser::parseSignSubmit, 2, 3 },
{ "sign_for", &RPCParser::parseSignFor, 3, 4 }, { "sign_for", &RPCParser::parseSignFor, 3, 4 },

View File

@@ -0,0 +1,123 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_OVERLAY_PEER_RESERVATION_TABLE_H_INCLUDED
#define RIPPLE_OVERLAY_PEER_RESERVATION_TABLE_H_INCLUDED
#include <ripple/beast/hash/uhash.h>
#include <ripple/beast/hash/hash_append.h>
#include <ripple/beast/utility/Journal.h>
#include <ripple/json/json_forwards.h>
#include <ripple/protocol/PublicKey.h>
#define SOCI_USE_BOOST
#include <boost/optional.hpp>
#include <soci/soci.h>
#include <mutex>
#include <string>
#include <unordered_set>
#include <vector>
namespace ripple {
class DatabaseCon;
// Value type for reservations.
struct PeerReservation final
{
public:
PublicKey nodeId;
std::string description;
auto
toJson() const -> Json::Value;
template <typename Hasher>
friend void hash_append(Hasher& h, PeerReservation const& x) noexcept
{
using beast::hash_append;
hash_append(h, x.nodeId);
}
friend bool operator<(PeerReservation const& a, PeerReservation const& b)
{
return a.nodeId < b.nodeId;
}
};
// TODO: When C++20 arrives, take advantage of "equivalence" instead of
// "equality". Add an overload for `(PublicKey, PeerReservation)`, and just
// pass a `PublicKey` directly to `unordered_set.find`.
struct KeyEqual final
{
bool operator() (
PeerReservation const& lhs, PeerReservation const& rhs) const
{
return lhs.nodeId == rhs.nodeId;
}
};
class PeerReservationTable final
{
public:
explicit PeerReservationTable(
beast::Journal journal = beast::Journal(beast::Journal::getNullSink()))
: journal_(journal)
{
}
std::vector<PeerReservation> list() const;
bool
contains(PublicKey const& nodeId)
{
std::lock_guard<std::mutex> lock(this->mutex_);
return table_.find({nodeId}) != table_.end();
}
// Because `ApplicationImp` has two-phase initialization, so must we.
// Our dependencies are not prepared until the second phase.
bool
load(DatabaseCon& connection);
/**
* @return the replaced reservation if it existed
* @throw soci::soci_error
*/
auto
insert_or_assign(PeerReservation const& reservation)
-> boost::optional<PeerReservation>;
/**
* @return the erased reservation if it existed
*/
auto
erase(PublicKey const& nodeId) -> boost::optional<PeerReservation>;
private:
beast::Journal mutable journal_;
std::mutex mutable mutex_;
DatabaseCon* connection_;
std::unordered_set<PeerReservation, beast::uhash<>, KeyEqual> table_;
};
} // namespace ripple
#endif

View File

@@ -278,19 +278,26 @@ OverlayImpl::onHandoff (std::unique_ptr <beast::asio::ssl_bundle>&& ssl_bundle,
return handoff; return handoff;
} }
auto const result = m_peerFinder->activate (slot, *publicKey, {
static_cast<bool>(app_.cluster().member(*publicKey))); // The node gets a reserved slot if it is in our cluster
// or if it has a reservation.
bool const reserved {
static_cast<bool>(app_.cluster().member(*publicKey))
|| app_.peerReservations().contains(*publicKey)
};
auto const result = m_peerFinder->activate(slot, *publicKey, reserved);
if (result != PeerFinder::Result::success) if (result != PeerFinder::Result::success)
{ {
m_peerFinder->on_closed(slot); m_peerFinder->on_closed(slot);
JLOG(journal.debug()) << JLOG(journal.debug())
"Peer " << remote_endpoint << " redirected, slots full"; << "Peer " << remote_endpoint << " redirected, slots full";
handoff.moved = false; handoff.moved = false;
handoff.response = makeRedirectResponse(slot, request, handoff.response = makeRedirectResponse(
remote_endpoint.address()); slot, request, remote_endpoint.address());
handoff.keep_alive = beast::rfc2616::is_keep_alive(request); handoff.keep_alive = beast::rfc2616::is_keep_alive(request);
return handoff; return handoff;
} }
}
auto const peer = std::make_shared<PeerImp>(app_, id, auto const peer = std::make_shared<PeerImp>(app_, id,
remote_endpoint, slot, std::move(request), *hello, remote_endpoint, slot, std::move(request), *hello,

View File

@@ -254,6 +254,12 @@ PeerImp::crawl() const
return boost::beast::detail::iequals(iter->value(), "public"); return boost::beast::detail::iequals(iter->value(), "public");
} }
bool
PeerImp::cluster() const
{
return static_cast<bool>(app_.cluster().member(publicKey_));
}
std::string std::string
PeerImp::getVersion() const PeerImp::getVersion() const
{ {
@@ -1614,7 +1620,7 @@ PeerImp::onMessage (std::shared_ptr <protocol::TMProposeSet> const& m)
{ {
protocol::TMProposeSet& set = *m; protocol::TMProposeSet& set = *m;
if (set.has_hops() && ! slot_->cluster()) if (set.has_hops() && ! cluster())
set.set_hops(set.hops() + 1); set.set_hops(set.hops() + 1);
auto const sig = makeSlice(set.signature()); auto const sig = makeSlice(set.signature());
@@ -1983,7 +1989,7 @@ PeerImp::onMessage (std::shared_ptr <protocol::TMValidation> const& m)
{ {
auto const closeTime = app_.timeKeeper().closeTime(); auto const closeTime = app_.timeKeeper().closeTime();
if (m->has_hops() && ! slot_->cluster()) if (m->has_hops() && ! cluster())
m->set_hops(m->hops() + 1); m->set_hops(m->hops() + 1);
if (m->validation ().size () < 50) if (m->validation ().size () < 50)

View File

@@ -281,10 +281,7 @@ public:
crawl() const; crawl() const;
bool bool
cluster() const override cluster() const override;
{
return slot_->cluster();
}
void void
check(); check();

View File

@@ -0,0 +1,168 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/overlay/PeerReservationTable.h>
#include <ripple/basics/Log.h>
#include <ripple/core/DatabaseCon.h>
#include <ripple/json/json_value.h>
#include <ripple/protocol/PublicKey.h>
#include <ripple/protocol/jss.h>
#include <boost/optional.hpp>
#include <algorithm>
#include <iterator>
#include <mutex>
#include <string>
#include <vector>
namespace ripple {
auto
PeerReservation::toJson() const -> Json::Value
{
Json::Value result{Json::objectValue};
result[jss::node] = toBase58(TokenType::NodePublic, nodeId);
if (!description.empty())
{
result[jss::description] = description;
}
return result;
}
auto
PeerReservationTable::list() const -> std::vector<PeerReservation>
{
std::vector<PeerReservation> list;
{
std::lock_guard<std::mutex> lock(mutex_);
list.reserve(table_.size());
std::copy(table_.begin(), table_.end(), std::back_inserter(list));
}
std::sort(list.begin(), list.end());
return list;
}
// See `ripple/app/main/DBInit.cpp` for the `CREATE TABLE` statement.
// It is unfortunate that we do not get to define a function for it.
// We choose a `bool` return type to fit in with the error handling scheme
// of other functions called from `ApplicationImp::setup`, but we always
// return "no error" (`true`) because we can always return an empty table.
bool
PeerReservationTable::load(DatabaseCon& connection)
{
std::lock_guard<std::mutex> lock(mutex_);
connection_ = &connection;
auto db = connection_->checkoutDb();
boost::optional<std::string> valPubKey, valDesc;
// We should really abstract the table and column names into constants,
// but no one else does. Because it is too tedious? It would be easy if we
// had a jOOQ for C++.
soci::statement st =
(db->prepare << "SELECT PublicKey, Description FROM PeerReservations;",
soci::into(valPubKey),
soci::into(valDesc));
st.execute();
while (st.fetch())
{
if (!valPubKey || !valDesc)
{
// This represents a `NULL` in a `NOT NULL` column. It should be
// unreachable.
continue;
}
auto const optNodeId =
parseBase58<PublicKey>(TokenType::NodePublic, *valPubKey);
if (!optNodeId)
{
JLOG(journal_.warn()) << "load: not a public key: " << valPubKey;
continue;
}
table_.insert(PeerReservation{*optNodeId, *valDesc});
}
return true;
}
auto
PeerReservationTable::insert_or_assign(
PeerReservation const& reservation)
-> boost::optional<PeerReservation>
{
boost::optional<PeerReservation> previous;
std::lock_guard<std::mutex> lock(mutex_);
auto hint = table_.find(reservation);
if (hint != table_.end()) {
// The node already has a reservation. Remove it.
// `std::unordered_set` does not have an `insert_or_assign` method,
// and sadly makes it impossible for us to implement one efficiently:
// https://stackoverflow.com/q/49651835/618906
// Regardless, we don't expect this function to be called often, or
// for the table to be very large, so this less-than-ideal
// remove-then-insert is acceptable in order to present a better API.
previous = *hint;
// We should pick an adjacent location for the insertion hint.
// Decrementing may be illegal if the found reservation is at the
// beginning. Incrementing is always legal; at worst we'll point to
// the end.
auto const deleteme = hint;
++hint;
table_.erase(deleteme);
}
table_.insert(hint, reservation);
auto db = connection_->checkoutDb();
*db << "INSERT INTO PeerReservations (PublicKey, Description) "
"VALUES (:nodeId, :desc) "
"ON CONFLICT (PublicKey) DO UPDATE SET "
"Description=excluded.Description",
soci::use(toBase58(TokenType::NodePublic, reservation.nodeId)),
soci::use(reservation.description);
return previous;
}
auto
PeerReservationTable::erase(PublicKey const& nodeId)
-> boost::optional<PeerReservation>
{
boost::optional<PeerReservation> previous;
std::lock_guard<std::mutex> lock(mutex_);
auto const it = table_.find({nodeId});
if (it != table_.end())
{
previous = *it;
table_.erase(it);
auto db = connection_->checkoutDb();
*db << "DELETE FROM PeerReservations WHERE PublicKey = :nodeId",
soci::use(toBase58(TokenType::NodePublic, nodeId));
}
return previous;
}
} // namespace ripple

View File

@@ -222,7 +222,7 @@ public:
virtual virtual
Result Result
activate (Slot::ptr const& slot, activate (Slot::ptr const& slot,
PublicKey const& key, bool cluster) = 0; PublicKey const& key, bool reserved) = 0;
/** Returns a set of endpoints suitable for redirection. */ /** Returns a set of endpoints suitable for redirection. */
virtual virtual

View File

@@ -54,10 +54,11 @@ public:
*/ */
virtual bool fixed () const = 0; virtual bool fixed () const = 0;
/** Returns `true` if this is a cluster connection. /** Returns `true` if this is a reserved connection.
It might be a cluster peer, or a peer with a reservation.
This is only known after then handshake completes. This is only known after then handshake completes.
*/ */
virtual bool cluster () const = 0; virtual bool reserved () const = 0;
/** Returns the state of the connection. */ /** Returns the state of the connection. */
virtual State state () const = 0; virtual State state () const = 0;

View File

@@ -41,7 +41,7 @@ public:
, m_out_active (0) , m_out_active (0)
, m_fixed (0) , m_fixed (0)
, m_fixed_active (0) , m_fixed_active (0)
, m_cluster (0) , m_reserved (0)
, m_acceptCount (0) , m_acceptCount (0)
, m_closingCount (0) , m_closingCount (0)
@@ -71,7 +71,7 @@ public:
// Must be handshaked and in the right state // Must be handshaked and in the right state
assert (s.state() == Slot::connected || s.state() == Slot::accept); assert (s.state() == Slot::connected || s.state() == Slot::accept);
if (s.fixed () || s.cluster ()) if (s.fixed () || s.reserved ())
return true; return true;
if (s.inbound ()) if (s.inbound ())
@@ -231,7 +231,7 @@ public:
map ["in"] << m_in_active << "/" << m_in_max; map ["in"] << m_in_active << "/" << m_in_max;
map ["out"] << m_out_active << "/" << m_out_max; map ["out"] << m_out_active << "/" << m_out_max;
map ["fixed"] = m_fixed_active; map ["fixed"] = m_fixed_active;
map ["cluster"] = m_cluster; map ["reserved"] = m_reserved;
map ["total"] = m_active; map ["total"] = m_active;
} }
@@ -256,8 +256,8 @@ private:
if (s.fixed ()) if (s.fixed ())
m_fixed += n; m_fixed += n;
if (s.cluster ()) if (s.reserved ())
m_cluster += n; m_reserved += n;
switch (s.state ()) switch (s.state ())
{ {
@@ -275,7 +275,7 @@ private:
case Slot::active: case Slot::active:
if (s.fixed ()) if (s.fixed ())
m_fixed_active += n; m_fixed_active += n;
if (! s.fixed () && ! s.cluster ()) if (! s.fixed () && ! s.reserved ())
{ {
if (s.inbound ()) if (s.inbound ())
m_in_active += n; m_in_active += n;
@@ -299,7 +299,7 @@ private:
/** Outbound connection attempts. */ /** Outbound connection attempts. */
int m_attempts; int m_attempts;
/** Active connections, including fixed and cluster. */ /** Active connections, including fixed and reserved. */
std::size_t m_active; std::size_t m_active;
/** Total number of inbound slots. */ /** Total number of inbound slots. */
@@ -320,9 +320,8 @@ private:
/** Active fixed connections. */ /** Active fixed connections. */
std::size_t m_fixed_active; std::size_t m_fixed_active;
/** Cluster connections. */ /** Reserved connections. */
std::size_t m_cluster; std::size_t m_reserved;

View File

@@ -372,12 +372,11 @@ public:
} }
Result Result
activate (SlotImp::ptr const& slot, activate (SlotImp::ptr const& slot, PublicKey const& key, bool reserved)
PublicKey const& key, bool cluster)
{ {
JLOG(m_journal.debug()) << beast::leftw (18) << JLOG(m_journal.debug()) << beast::leftw (18) <<
"Logic handshake " << slot->remote_endpoint () << "Logic handshake " << slot->remote_endpoint () <<
" with " << (cluster ? "clustered " : "") << "key " << key; " with " << (reserved ? "reserved " : "") << "key " << key;
std::lock_guard<std::recursive_mutex> _(lock_); std::lock_guard<std::recursive_mutex> _(lock_);
@@ -392,9 +391,10 @@ public:
if (keys_.find (key) != keys_.end()) if (keys_.find (key) != keys_.end())
return Result::duplicate; return Result::duplicate;
// If the peer belongs to a cluster, update the slot to reflect that. // If the peer belongs to a cluster or is reserved,
// update the slot to reflect that.
counts_.remove (*slot); counts_.remove (*slot);
slot->cluster (cluster); slot->reserved (reserved);
counts_.add (*slot); counts_.add (*slot);
// See if we have an open space for this slot // See if we have an open space for this slot
@@ -1088,8 +1088,8 @@ public:
item ["inbound"] = "yes"; item ["inbound"] = "yes";
if (slot.fixed()) if (slot.fixed())
item ["fixed"] = "yes"; item ["fixed"] = "yes";
if (slot.cluster()) if (slot.reserved())
item ["cluster"] = "yes"; item ["reserved"] = "yes";
item ["state"] = stateString (slot.state()); item ["state"] = stateString (slot.state());
} }

View File

@@ -173,11 +173,10 @@ public:
} }
Result Result
activate (Slot::ptr const& slot, activate (Slot::ptr const& slot, PublicKey const& key, bool reserved) override
PublicKey const& key, bool cluster) override
{ {
SlotImp::ptr impl (std::dynamic_pointer_cast <SlotImp> (slot)); SlotImp::ptr impl (std::dynamic_pointer_cast <SlotImp> (slot));
return m_logic.activate (impl, key, cluster); return m_logic.activate (impl, key, reserved);
} }
std::vector <Endpoint> std::vector <Endpoint>

View File

@@ -30,7 +30,7 @@ SlotImp::SlotImp (beast::IP::Endpoint const& local_endpoint,
: recent (clock) : recent (clock)
, m_inbound (true) , m_inbound (true)
, m_fixed (fixed) , m_fixed (fixed)
, m_cluster (false) , m_reserved (false)
, m_state (accept) , m_state (accept)
, m_remote_endpoint (remote_endpoint) , m_remote_endpoint (remote_endpoint)
, m_local_endpoint (local_endpoint) , m_local_endpoint (local_endpoint)
@@ -46,7 +46,7 @@ SlotImp::SlotImp (beast::IP::Endpoint const& remote_endpoint,
: recent (clock) : recent (clock)
, m_inbound (false) , m_inbound (false)
, m_fixed (fixed) , m_fixed (fixed)
, m_cluster (false) , m_reserved (false)
, m_state (connect) , m_state (connect)
, m_remote_endpoint (remote_endpoint) , m_remote_endpoint (remote_endpoint)
, m_listening_port (unknownPort) , m_listening_port (unknownPort)

View File

@@ -57,9 +57,9 @@ public:
return m_fixed; return m_fixed;
} }
bool cluster () const override bool reserved () const override
{ {
return m_cluster; return m_reserved;
} }
State state () const override State state () const override
@@ -110,9 +110,9 @@ public:
m_public_key = key; m_public_key = key;
} }
void cluster (bool cluster_) void reserved (bool reserved_)
{ {
m_cluster = cluster_; m_reserved = reserved_;
} }
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
@@ -155,7 +155,7 @@ public:
private: private:
bool const m_inbound; bool const m_inbound;
bool const m_fixed; bool const m_fixed;
bool m_cluster; bool m_reserved;
State m_state; State m_state;
beast::IP::Endpoint m_remote_endpoint; beast::IP::Endpoint m_remote_endpoint;
boost::optional <beast::IP::Endpoint> m_local_endpoint; boost::optional <beast::IP::Endpoint> m_local_endpoint;

View File

@@ -29,8 +29,16 @@ namespace jss {
#define JSS(x) constexpr ::Json::StaticString x ( #x ) #define JSS(x) constexpr ::Json::StaticString x ( #x )
/* The "StaticString" field names are used instead of string literals to /* These "StaticString" field names are used instead of string literals to
optimize the performance of accessing members of Json::Value objects. optimize the performance of accessing properties of Json::Value objects.
Most strings have a trailing comment. Here is the legend:
in: Read by the given RPC handler from its `Json::Value` parameter.
out: Assigned by the given RPC handler in the `Json::Value` it returns.
field: A field of at least one type of transaction.
RPC: Common properties of RPC requests and responses.
error: Common properties of RPC error responses.
*/ */
JSS ( AL_hit_rate ); // out: GetCounts JSS ( AL_hit_rate ); // out: GetCounts
@@ -177,6 +185,7 @@ JSS ( deposit_authorized ); // out: deposit_authorized
JSS ( deposit_preauth ); // in: AccountObjects, LedgerData JSS ( deposit_preauth ); // in: AccountObjects, LedgerData
JSS ( deprecated ); // out JSS ( deprecated ); // out
JSS ( descending ); // in: AccountTx* JSS ( descending ); // in: AccountTx*
JSS ( description ); // in/out: Reservations
JSS ( destination_account ); // in: PathRequest, RipplePathFind, account_lines JSS ( destination_account ); // in: PathRequest, RipplePathFind, account_lines
// out: AccountChannels // out: AccountChannels
JSS ( destination_amount ); // in: PathRequest, RipplePathFind JSS ( destination_amount ); // in: PathRequest, RipplePathFind
@@ -225,7 +234,7 @@ JSS ( freeze_peer ); // out: AccountLines
JSS ( frozen_balances ); // out: GatewayBalances JSS ( frozen_balances ); // out: GatewayBalances
JSS ( full ); // in: LedgerClearer, handlers/Ledger JSS ( full ); // in: LedgerClearer, handlers/Ledger
JSS ( full_reply ); // out: PathFind JSS ( full_reply ); // out: PathFind
JSS ( fullbelow_size ); // in: GetCounts JSS ( fullbelow_size ); // out: GetCounts
JSS ( good ); // out: RPCVersion JSS ( good ); // out: RPCVersion
JSS ( hash ); // out: NetworkOPs, InboundLedger, JSS ( hash ); // out: NetworkOPs, InboundLedger,
// LedgerToJson, STTx; field // LedgerToJson, STTx; field
@@ -378,6 +387,7 @@ JSS ( peer_disconnects ); // Severed peer connection counter.
JSS ( peer_disconnects_resources ); // Severed peer connections because of JSS ( peer_disconnects_resources ); // Severed peer connections because of
// excess resource consumption. // excess resource consumption.
JSS ( port ); // in: Connect JSS ( port ); // in: Connect
JSS ( previous ); // out: Reservations
JSS ( previous_ledger ); // out: LedgerPropose JSS ( previous_ledger ); // out: LedgerPropose
JSS ( proof ); // in: BookOffers JSS ( proof ); // in: BookOffers
JSS ( propose_seq ); // out: LedgerPropose JSS ( propose_seq ); // out: LedgerPropose
@@ -406,6 +416,7 @@ JSS ( refresh_interval_min ); // out: ValidatorSites
JSS ( regular_seed ); // in/out: LedgerEntry JSS ( regular_seed ); // in/out: LedgerEntry
JSS ( remote ); // out: Logic.h JSS ( remote ); // out: Logic.h
JSS ( request ); // RPC JSS ( request ); // RPC
JSS ( reservations ); // out: Reservations
JSS ( reserve_base ); // out: NetworkOPs JSS ( reserve_base ); // out: NetworkOPs
JSS ( reserve_base_xrp ); // out: NetworkOPs JSS ( reserve_base_xrp ); // out: NetworkOPs
JSS ( reserve_inc ); // out: NetworkOPs JSS ( reserve_inc ); // out: NetworkOPs

View File

@@ -64,6 +64,9 @@ Json::Value doPeers (RPC::Context&);
Json::Value doPing (RPC::Context&); Json::Value doPing (RPC::Context&);
Json::Value doPrint (RPC::Context&); Json::Value doPrint (RPC::Context&);
Json::Value doRandom (RPC::Context&); Json::Value doRandom (RPC::Context&);
Json::Value doPeerReservationsAdd (RPC::Context&);
Json::Value doPeerReservationsDel (RPC::Context&);
Json::Value doPeerReservationsList (RPC::Context&);
Json::Value doRipplePathFind (RPC::Context&); Json::Value doRipplePathFind (RPC::Context&);
Json::Value doServerInfo (RPC::Context&); // for humans Json::Value doServerInfo (RPC::Context&); // for humans
Json::Value doServerState (RPC::Context&); // for machines Json::Value doServerState (RPC::Context&); // for machines

View File

@@ -0,0 +1,130 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2019 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/json/json_value.h>
#include <ripple/net/RPCErr.h>
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/PublicKey.h>
#include <ripple/protocol/jss.h>
#include <ripple/rpc/Context.h>
#include <ripple/rpc/handlers/Handlers.h>
#include <boost/optional.hpp>
#include <string>
#include <utility>
namespace ripple {
Json::Value
doPeerReservationsAdd(RPC::Context& context)
{
auto const& params = context.params;
if (!params.isMember(jss::public_key))
return RPC::missing_field_error(jss::public_key);
// Returning JSON from every function ruins any attempt to encapsulate
// the pattern of "get field F as type T, and diagnose an error if it is
// missing or malformed":
// - It is costly to copy whole JSON objects around just to check whether an
// error code is present.
// - It is not as easy to read when cluttered by code to pack and unpack the
// JSON object.
// - It is not as easy to write when you have to include all the packing and
// unpacking code.
// Exceptions would be easier to use, but have a terrible cost for control
// flow. An error monad is purpose-built for this situation; it is
// essentially an optional (the "maybe monad" in Haskell) with a non-unit
// type for the failure case to capture more information.
if (!params[jss::public_key].isString())
return RPC::expected_field_error(jss::public_key, "a string");
// Same for the pattern of "if field F is present, make sure it has type T
// and get it".
std::string desc;
if (params.isMember(jss::description))
{
if (!params[jss::description].isString())
return RPC::expected_field_error(jss::description, "a string");
desc = params[jss::description].asString();
}
// channel_verify takes a key in both base58 and hex.
// @nikb prefers that we take only base58.
boost::optional<PublicKey> optPk = parseBase58<PublicKey>(
TokenType::NodePublic, params[jss::public_key].asString());
if (!optPk)
return rpcError(rpcPUBLIC_MALFORMED);
PublicKey const& nodeId = *optPk;
auto const previous = context.app.peerReservations().insert_or_assign(
PeerReservation{nodeId, desc});
Json::Value result{Json::objectValue};
if (previous)
{
result[jss::previous] = previous->toJson();
}
return result;
}
Json::Value
doPeerReservationsDel(RPC::Context& context)
{
auto const& params = context.params;
// We repeat much of the parameter parsing from `doPeerReservationsAdd`.
if (!params.isMember(jss::public_key))
return RPC::missing_field_error(jss::public_key);
if (!params[jss::public_key].isString())
return RPC::expected_field_error(jss::public_key, "a string");
boost::optional<PublicKey> optPk = parseBase58<PublicKey>(
TokenType::NodePublic, params[jss::public_key].asString());
if (!optPk)
return rpcError(rpcPUBLIC_MALFORMED);
PublicKey const& nodeId = *optPk;
auto const previous = context.app.peerReservations().erase(nodeId);
Json::Value result{Json::objectValue};
if (previous)
{
result[jss::previous] = previous->toJson();
}
return result;
}
Json::Value
doPeerReservationsList(RPC::Context& context)
{
auto const& reservations = context.app.peerReservations().list();
// Enumerate the reservations in context.app.peerReservations()
// as a Json::Value.
Json::Value result{Json::objectValue};
Json::Value& jaReservations = result[jss::reservations] = Json::arrayValue;
for (auto const& reservation : reservations)
{
jaReservations.append(reservation.toJson());
}
return result;
}
} // namespace ripple

View File

@@ -97,6 +97,9 @@ Handler const handlerArray[] {
{ "print", byRef (&doPrint), Role::ADMIN, NO_CONDITION }, { "print", byRef (&doPrint), Role::ADMIN, NO_CONDITION },
// { "profile", byRef (&doProfile), Role::USER, NEEDS_CURRENT_LEDGER }, // { "profile", byRef (&doProfile), Role::USER, NEEDS_CURRENT_LEDGER },
{ "random", byRef (&doRandom), Role::USER, NO_CONDITION }, { "random", byRef (&doRandom), Role::USER, NO_CONDITION },
{ "peer_reservations_add", byRef (&doPeerReservationsAdd), Role::ADMIN, NO_CONDITION },
{ "peer_reservations_del", byRef (&doPeerReservationsDel), Role::ADMIN, NO_CONDITION },
{ "peer_reservations_list", byRef (&doPeerReservationsList), Role::ADMIN, NO_CONDITION },
{ "ripple_path_find", byRef (&doRipplePathFind), Role::USER, NO_CONDITION }, { "ripple_path_find", byRef (&doRipplePathFind), Role::USER, NO_CONDITION },
{ "sign", byRef (&doSign), Role::USER, NO_CONDITION }, { "sign", byRef (&doSign), Role::USER, NO_CONDITION },
{ "sign_for", byRef (&doSignFor), Role::USER, NO_CONDITION }, { "sign_for", byRef (&doSignFor), Role::USER, NO_CONDITION },

View File

@@ -19,6 +19,7 @@
#include <ripple/overlay/impl/PeerImp.cpp> #include <ripple/overlay/impl/PeerImp.cpp>
#include <ripple/overlay/impl/PeerReservationTable.cpp>
#include <ripple/overlay/impl/PeerSet.cpp> #include <ripple/overlay/impl/PeerSet.cpp>
#include <ripple/overlay/impl/TMHello.cpp> #include <ripple/overlay/impl/TMHello.cpp>
#include <ripple/overlay/impl/TrafficCount.cpp> #include <ripple/overlay/impl/TrafficCount.cpp>

View File

@@ -31,6 +31,7 @@
#include <ripple/rpc/handlers/Ping.cpp> #include <ripple/rpc/handlers/Ping.cpp>
#include <ripple/rpc/handlers/Print.cpp> #include <ripple/rpc/handlers/Print.cpp>
#include <ripple/rpc/handlers/Random.cpp> #include <ripple/rpc/handlers/Random.cpp>
#include <ripple/rpc/handlers/Reservations.cpp>
#include <ripple/rpc/handlers/RipplePathFind.cpp> #include <ripple/rpc/handlers/RipplePathFind.cpp>
#include <ripple/rpc/handlers/ServerInfo.cpp> #include <ripple/rpc/handlers/ServerInfo.cpp>
#include <ripple/rpc/handlers/ServerState.cpp> #include <ripple/rpc/handlers/ServerState.cpp>