From 4b9d3ca7de7c5758fbd6ae774c74196fa6b4268c Mon Sep 17 00:00:00 2001 From: Edward Hennis Date: Wed, 9 Sep 2020 18:51:08 -0400 Subject: [PATCH] Support UNLs with future effective dates: * Creates a version 2 of the UNL file format allowing publishers to pre-publish the next UNL while the current one is still valid. * Version 1 of the UNL file format is still valid and backward compatible. * Also causes rippled to lock down if it has no valid UNLs, similar to being amendment blocked, except reversible. * Resolves #3548 * Resolves #3470 --- src/ripple/app/consensus/RCLConsensus.cpp | 2 +- src/ripple/app/ledger/impl/LedgerMaster.cpp | 2 +- src/ripple/app/main/Application.cpp | 18 +- src/ripple/app/misc/NetworkOPs.cpp | 334 +++-- src/ripple/app/misc/NetworkOPs.h | 8 + src/ripple/app/misc/ValidatorList.h | 456 +++++- src/ripple/app/misc/ValidatorSite.h | 2 +- src/ripple/app/misc/impl/ValidatorList.cpp | 1370 +++++++++++++++--- src/ripple/app/misc/impl/ValidatorSite.cpp | 138 +- src/ripple/overlay/Message.h | 9 + src/ripple/overlay/Peer.h | 1 + src/ripple/overlay/impl/Message.cpp | 27 +- src/ripple/overlay/impl/OverlayImpl.cpp | 52 +- src/ripple/overlay/impl/PeerImp.cpp | 395 ++++-- src/ripple/overlay/impl/PeerImp.h | 11 + src/ripple/overlay/impl/ProtocolMessage.h | 33 +- src/ripple/overlay/impl/ProtocolVersion.cpp | 3 +- src/ripple/overlay/impl/TrafficCount.cpp | 3 +- src/ripple/proto/ripple.proto | 19 +- src/ripple/protocol/ErrorCodes.h | 4 +- src/ripple/protocol/impl/ErrorCodes.cpp | 1 + src/ripple/protocol/jss.h | 7 + src/ripple/rpc/impl/Handler.h | 21 +- src/test/app/ValidatorList_test.cpp | 1410 ++++++++++++++++--- src/test/app/ValidatorSite_test.cpp | 170 ++- src/test/consensus/NegativeUNL_test.cpp | 7 +- src/test/jtx/TrustedPublisherServer.h | 140 +- src/test/net/DatabaseDownloader_test.cpp | 2 + src/test/overlay/compression_test.cpp | 42 +- src/test/rpc/ShardArchiveHandler_test.cpp | 2 + src/test/rpc/ValidatorRPC_test.cpp | 223 ++- 31 files changed, 3980 insertions(+), 932 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index d0bb15fca..6c5206716 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -949,7 +949,7 @@ RCLConsensus::Adaptor::preStartRound( // and are not amendment blocked. validating_ = valPublic_.size() != 0 && prevLgr.seq() >= app_.getMaxDisallowedLedger() && - !app_.getOPs().isAmendmentBlocked(); + !app_.getOPs().isBlocked(); // If we are not running in standalone mode and there's a configured UNL, // check to make sure that it's not expired. diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 3a21c050d..f7195c813 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -352,7 +352,7 @@ LedgerMaster::setValidLedger(std::shared_ptr const& l) app_.getSHAMapStore().onLedgerClosed(getValidatedLedger()); mLedgerHistory.validatedLedger(l, consensusHash); app_.getAmendmentTable().doValidatedLedger(l); - if (!app_.getOPs().isAmendmentBlocked()) + if (!app_.getOPs().isBlocked()) { if (app_.getAmendmentTable().hasUnsupportedEnabled()) { diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index d45608156..fb090088b 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -2117,6 +2117,18 @@ ApplicationImp::serverOkay(std::string& reason) return false; } + if (getOPs().isAmendmentBlocked()) + { + reason = "Server version too old"; + return false; + } + + if (getOPs().isUNLBlocked()) + { + reason = "No valid validator list available"; + return false; + } + if (getOPs().getOperatingMode() < OperatingMode::SYNCING) { reason = "Not synchronized with network"; @@ -2132,12 +2144,6 @@ ApplicationImp::serverOkay(std::string& reason) return false; } - if (getOPs().isAmendmentBlocked()) - { - reason = "Server version too old"; - return false; - } - return true; } diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 3d4ff057e..67e9f2924 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -261,19 +261,13 @@ public: public: OperatingMode - getOperatingMode() const override - { - return mMode; - } + getOperatingMode() const override; std::string strOperatingMode(OperatingMode const mode, bool const admin) const override; std::string - strOperatingMode(bool const admin = false) const override - { - return strOperatingMode(mMode, admin); - } + strOperatingMode(bool const admin = false) const override; // // Transaction operations. @@ -391,10 +385,7 @@ public: void endConsensus() override; void - setStandAlone() override - { - setMode(OperatingMode::FULL); - } + setStandAlone() override; /** Called to initially start our timers. Not called for stand-alone mode. @@ -403,51 +394,35 @@ public: setStateTimer() override; void - setNeedNetworkLedger() override - { - needNetworkLedger_ = true; - } + setNeedNetworkLedger() override; void - clearNeedNetworkLedger() override - { - needNetworkLedger_ = false; - } + clearNeedNetworkLedger() override; bool - isNeedNetworkLedger() override - { - return needNetworkLedger_; - } + isNeedNetworkLedger() override; bool - isFull() override - { - return !needNetworkLedger_ && (mMode == OperatingMode::FULL); - } + isFull() override; void setMode(OperatingMode om) override; bool - isAmendmentBlocked() override - { - return amendmentBlocked_; - } + isBlocked() override; + bool + isAmendmentBlocked() override; void setAmendmentBlocked() override; bool - isAmendmentWarned() override - { - return !amendmentBlocked_ && amendmentWarned_; - } + isAmendmentWarned() override; void - setAmendmentWarned() override - { - amendmentWarned_ = true; - } + setAmendmentWarned() override; void - clearAmendmentWarned() override - { - amendmentWarned_ = false; - } + clearAmendmentWarned() override; + bool + isUNLBlocked() override; + void + setUNLBlocked() override; + void + clearUNLBlocked() override; void consensusViewChange() override; @@ -470,15 +445,9 @@ public: reportConsensusStateChange(ConsensusPhase phase); void - updateLocalTx(ReadView const& view) override - { - m_localTX->sweep(view); - } + updateLocalTx(ReadView const& view) override; std::size_t - getLocalTxCount() override - { - return m_localTX->size(); - } + getLocalTxCount() override; // Helper function to generate SQL query to get transactions. std::string @@ -638,34 +607,7 @@ public: // Stoppable. void - onStop() override - { - mAcquiringLedger.reset(); - { - boost::system::error_code ec; - heartbeatTimer_.cancel(ec); - if (ec) - { - JLOG(m_journal.error()) - << "NetworkOPs: heartbeatTimer cancel error: " - << ec.message(); - } - - ec.clear(); - clusterTimer_.cancel(ec); - if (ec) - { - JLOG(m_journal.error()) - << "NetworkOPs: clusterTimer cancel error: " - << ec.message(); - } - } - // Make sure that any waitHandlers pending in our timers are done - // before we declare ourselves stopped. - using namespace std::chrono_literals; - waitHandlerCounter_.join("NetworkOPs", 1s, m_journal); - stopped(); - } + onStop() override; private: void @@ -720,6 +662,7 @@ private: std::atomic needNetworkLedger_{false}; std::atomic amendmentBlocked_{false}; std::atomic amendmentWarned_{false}; + std::atomic unlBlocked_{false}; ClosureCounter waitHandlerCounter_; boost::asio::steady_timer heartbeatTimer_; @@ -827,47 +770,7 @@ private: private: void - collect_metrics() - { - auto [counters, mode, start] = accounting_.getCounterData(); - auto const current = - std::chrono::duration_cast( - std::chrono::system_clock::now() - start); - counters[static_cast(mode)].dur += current; - - std::lock_guard lock(m_statsMutex); - m_stats.disconnected_duration.set( - counters[static_cast(OperatingMode::DISCONNECTED)] - .dur.count()); - m_stats.connected_duration.set( - counters[static_cast(OperatingMode::CONNECTED)] - .dur.count()); - m_stats.syncing_duration.set( - counters[static_cast(OperatingMode::SYNCING)] - .dur.count()); - m_stats.tracking_duration.set( - counters[static_cast(OperatingMode::TRACKING)] - .dur.count()); - m_stats.full_duration.set( - counters[static_cast(OperatingMode::FULL)] - .dur.count()); - - m_stats.disconnected_transitions.set( - counters[static_cast(OperatingMode::DISCONNECTED)] - .transitions); - m_stats.connected_transitions.set( - counters[static_cast(OperatingMode::CONNECTED)] - .transitions); - m_stats.syncing_transitions.set( - counters[static_cast(OperatingMode::SYNCING)] - .transitions); - m_stats.tracking_transitions.set( - counters[static_cast(OperatingMode::TRACKING)] - .transitions); - m_stats.full_transitions.set( - counters[static_cast(OperatingMode::FULL)] - .transitions); - } + collect_metrics(); }; //------------------------------------------------------------------------------ @@ -886,6 +789,48 @@ std::array const Json::StaticString(stateNames[4])}}; //------------------------------------------------------------------------------ +inline OperatingMode +NetworkOPsImp::getOperatingMode() const +{ + return mMode; +} + +inline std::string +NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const +{ + return strOperatingMode(mMode, admin); +} + +inline void +NetworkOPsImp::setStandAlone() +{ + setMode(OperatingMode::FULL); +} + +inline void +NetworkOPsImp::setNeedNetworkLedger() +{ + needNetworkLedger_ = true; +} + +inline void +NetworkOPsImp::clearNeedNetworkLedger() +{ + needNetworkLedger_ = false; +} + +inline bool +NetworkOPsImp::isNeedNetworkLedger() +{ + return needNetworkLedger_; +} + +inline bool +NetworkOPsImp::isFull() +{ + return !needNetworkLedger_ && (mMode == OperatingMode::FULL); +} + std::string NetworkOPsImp::getHostId(bool forAdmin) { @@ -1554,11 +1499,60 @@ NetworkOPsImp::getOwnerInfo( // Other // +inline bool +NetworkOPsImp::isBlocked() +{ + return isAmendmentBlocked() || isUNLBlocked(); +} + +inline bool +NetworkOPsImp::isAmendmentBlocked() +{ + return amendmentBlocked_; +} + void NetworkOPsImp::setAmendmentBlocked() { amendmentBlocked_ = true; - setMode(OperatingMode::TRACKING); + setMode(OperatingMode::CONNECTED); +} + +inline bool +NetworkOPsImp::isAmendmentWarned() +{ + return !amendmentBlocked_ && amendmentWarned_; +} + +inline void +NetworkOPsImp::setAmendmentWarned() +{ + amendmentWarned_ = true; +} + +inline void +NetworkOPsImp::clearAmendmentWarned() +{ + amendmentWarned_ = false; +} + +inline bool +NetworkOPsImp::isUNLBlocked() +{ + return unlBlocked_; +} + +void +NetworkOPsImp::setUNLBlocked() +{ + unlBlocked_ = true; + setMode(OperatingMode::CONNECTED); +} + +inline void +NetworkOPsImp::clearUNLBlocked() +{ + unlBlocked_ = false; } bool @@ -1752,7 +1746,11 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed) if (prevLedger->rules().enabled(featureNegativeUNL)) app_.validators().setNegativeUNL(prevLedger->negativeUNL()); TrustChanges const changes = app_.validators().updateTrusted( - app_.getValidations().getCurrentNodeIDs()); + app_.getValidations().getCurrentNodeIDs(), + closingInfo.parentCloseTime, + *this, + app_.overlay(), + app_.getHashRouter()); if (!changes.added.empty() || !changes.removed.empty()) app_.getValidations().trustChanged(changes.added, changes.removed); @@ -2154,8 +2152,8 @@ NetworkOPsImp::setMode(OperatingMode om) om = OperatingMode::CONNECTED; } - if ((om > OperatingMode::TRACKING) && amendmentBlocked_) - om = OperatingMode::TRACKING; + if ((om > OperatingMode::CONNECTED) && isBlocked()) + om = OperatingMode::CONNECTED; if (mMode == om) return; @@ -2505,6 +2503,15 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) "This server is amendment blocked, and must be updated to be " "able to stay in sync with the network."; } + if (isUNLBlocked()) + { + Json::Value& w = warnings.append(Json::objectValue); + w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST; + w[jss::message] = + "This server has an expired validator list. validators.txt " + "may be incorrectly configured or some [validator_list_sites] " + "may be unreachable."; + } if (admin && isAmendmentWarned()) { Json::Value& w = warnings.append(Json::objectValue); @@ -2948,6 +2955,17 @@ NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase) [this, phase](Job&) { pubConsensus(phase); }); } +inline void +NetworkOPsImp::updateLocalTx(ReadView const& view) +{ + m_localTX->sweep(view); +} +inline std::size_t +NetworkOPsImp::getLocalTxCount() +{ + return m_localTX->size(); +} + // This routine should only be used to publish accepted or validated // transactions. Json::Value @@ -3498,6 +3516,34 @@ NetworkOPsImp::tryRemoveRpcSub(std::string const& strUrl) return true; } +void +NetworkOPsImp::onStop() +{ + mAcquiringLedger.reset(); + { + boost::system::error_code ec; + heartbeatTimer_.cancel(ec); + if (ec) + { + JLOG(m_journal.error()) + << "NetworkOPs: heartbeatTimer cancel error: " << ec.message(); + } + + ec.clear(); + clusterTimer_.cancel(ec); + if (ec) + { + JLOG(m_journal.error()) + << "NetworkOPs: clusterTimer cancel error: " << ec.message(); + } + } + // Make sure that any waitHandlers pending in our timers are done + // before we declare ourselves stopped. + using namespace std::chrono_literals; + waitHandlerCounter_.join("NetworkOPs", 1s, m_journal); + stopped(); +} + #ifndef USE_NEW_BOOK_PAGE // NIKB FIXME this should be looked at. There's no reason why this shouldn't @@ -3855,6 +3901,44 @@ NetworkOPsImp::getBookPage( #endif +inline void +NetworkOPsImp::collect_metrics() +{ + auto [counters, mode, start] = accounting_.getCounterData(); + auto const current = std::chrono::duration_cast( + std::chrono::system_clock::now() - start); + counters[static_cast(mode)].dur += current; + + std::lock_guard lock(m_statsMutex); + m_stats.disconnected_duration.set( + counters[static_cast(OperatingMode::DISCONNECTED)] + .dur.count()); + m_stats.connected_duration.set( + counters[static_cast(OperatingMode::CONNECTED)] + .dur.count()); + m_stats.syncing_duration.set( + counters[static_cast(OperatingMode::SYNCING)].dur.count()); + m_stats.tracking_duration.set( + counters[static_cast(OperatingMode::TRACKING)] + .dur.count()); + m_stats.full_duration.set( + counters[static_cast(OperatingMode::FULL)].dur.count()); + + m_stats.disconnected_transitions.set( + counters[static_cast(OperatingMode::DISCONNECTED)] + .transitions); + m_stats.connected_transitions.set( + counters[static_cast(OperatingMode::CONNECTED)] + .transitions); + m_stats.syncing_transitions.set( + counters[static_cast(OperatingMode::SYNCING)].transitions); + m_stats.tracking_transitions.set( + counters[static_cast(OperatingMode::TRACKING)] + .transitions); + m_stats.full_transitions.set( + counters[static_cast(OperatingMode::FULL)].transitions); +} + //------------------------------------------------------------------------------ NetworkOPs::NetworkOPs(Stoppable& parent) diff --git a/src/ripple/app/misc/NetworkOPs.h b/src/ripple/app/misc/NetworkOPs.h index 34da5092b..2e0cb30fe 100644 --- a/src/ripple/app/misc/NetworkOPs.h +++ b/src/ripple/app/misc/NetworkOPs.h @@ -201,6 +201,8 @@ public: virtual void setMode(OperatingMode om) = 0; virtual bool + isBlocked() = 0; + virtual bool isAmendmentBlocked() = 0; virtual void setAmendmentBlocked() = 0; @@ -210,6 +212,12 @@ public: setAmendmentWarned() = 0; virtual void clearAmendmentWarned() = 0; + virtual bool + isUNLBlocked() = 0; + virtual void + setUNLBlocked() = 0; + virtual void + clearUNLBlocked() = 0; virtual void consensusViewChange() = 0; diff --git a/src/ripple/app/misc/ValidatorList.h b/src/ripple/app/misc/ValidatorList.h index 1ec4bf0b3..0365ca88f 100644 --- a/src/ripple/app/misc/ValidatorList.h +++ b/src/ripple/app/misc/ValidatorList.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -34,33 +35,70 @@ #include #include +namespace protocol { +class TMValidatorList; +class TMValidatorListCollection; +} // namespace protocol + namespace ripple { -// predeclaration class Overlay; class HashRouter; +class Message; +class NetworkOPs; +class Peer; class STValidation; +/* Entries in this enum are ordered by "desirability". + The "better" dispositions have lower values than the + "worse" dispositions */ enum class ListDisposition { /// List is valid accepted = 0, + /// List is expired, but has the largest non-pending sequence seen so far + expired, + + /// List will be valid in the future + pending, + /// Same sequence as current list same_sequence, - /// List version is not supported - unsupported_version, - - /// List signed by untrusted publisher key - untrusted, + /// Future sequence already seen + known_sequence, /// Trusted publisher key, but seq is too old stale, + /// List signed by untrusted publisher key + untrusted, + + /// List version is not supported + unsupported_version, + /// Invalid format or signature invalid }; +/* Entries in this enum are ordered by "desirability". + The "better" dispositions have lower values than the + "worse" dispositions */ +enum class PublisherStatus { + // Publisher has provided a valid file + available = 0, + + // Current list is expired without replacement + expired, + + // No file seen yet + unavailable, + + // Publisher has revoked their manifest key + revoked, + +}; + std::string to_string(ListDisposition disposition); @@ -74,6 +112,18 @@ struct TrustChanges hash_set removed; }; +/** Used to represent the information stored in the blobs_v2 Json array */ +struct ValidatorBlobInfo +{ + // base-64 encoded JSON containing the validator list. + std::string blob; + // hex-encoded signature of the blob using the publisher's signing key + std::string signature; + // base-64 or hex-encoded manifest containing the publisher's master and + // signing public keys + boost::optional manifest; +}; + /** Trusted Validators List ----------------------- @@ -90,12 +140,14 @@ struct TrustChanges New lists are expected to include the following data: @li @c "blob": Base64-encoded JSON string containing a @c "sequence", @c - "expiration", and @c "validators" field. @c "expiration" contains the - Ripple timestamp (seconds since January 1st, 2000 (00:00 UTC)) for when - the list expires. @c "validators" contains an array of objects with a - @c "validation_public_key" and optional @c "manifest" field. - @c "validation_public_key" should be the hex-encoded master public key. - @c "manifest" should be the base64-encoded validator manifest. + "validFrom", @c "validUntil", and @c "validators" field. @c "validFrom" + contains the Ripple timestamp (seconds since January 1st, 2000 (00:00 + UTC)) for when the list becomes valid. @c "validUntil" contains the + Ripple timestamp for when the list expires. @c "validators" contains + an array of objects with a @c "validation_public_key" and optional + @c "manifest" field. @c "validation_public_key" should be the + hex-encoded master public key. @c "manifest" should be the + base64-encoded validator manifest. @li @c "manifest": Base64-encoded serialization of a manifest containing the publisher's master and signing public keys. @@ -123,32 +175,66 @@ class ValidatorList { explicit PublisherList() = default; - bool available; std::vector list; + std::vector manifests; std::size_t sequence; - TimeKeeper::time_point expiration; + TimeKeeper::time_point validFrom; + TimeKeeper::time_point validUntil; std::string siteUri; - std::string rawManifest; + // base-64 encoded JSON containing the validator list. std::string rawBlob; + // hex-encoded signature of the blob using the publisher's signing key std::string rawSignature; - std::uint32_t rawVersion; + // base-64 or hex-encoded manifest containing the publisher's master and + // signing public keys + boost::optional rawManifest; uint256 hash; }; + struct PublisherListCollection + { + PublisherStatus status; + /* + The `current` VL is the one which + 1. Has the largest sequence number that + 2. Has ever been effective (the effective date is absent or in the + past). + If this VL has expired, all VLs with previous sequence numbers + will also be considered expired, and thus there will be no valid VL + until one with a larger sequence number becomes effective. This is to + prevent allowing old VLs to reactivate. + */ + PublisherList current; + /* + The `remaining` list holds any relevant VLs which have a larger sequence + number than current. By definition they will all have an effective date + in the future. Relevancy will be determined by sorting the VLs by + sequence number, then iterating over the list and removing any VLs for + which the following VL (ignoring gaps) has the same or earlier effective + date. + */ + std::map remaining; + boost::optional maxSequence; + // The hash of the full set if sent in a single message + uint256 fullHash; + std::string rawManifest; + std::uint32_t rawVersion = 0; + }; + ManifestCache& validatorManifests_; ManifestCache& publisherManifests_; TimeKeeper& timeKeeper_; boost::filesystem::path const dataPath_; beast::Journal const j_; boost::shared_mutex mutable mutex_; - using unique_lock = std::unique_lock; - using shared_lock = std::shared_lock; + using lock_guard = std::lock_guard; + using shared_lock = std::shared_lock; std::atomic quorum_; boost::optional minimumQuorum_; // Published lists stored by publisher master public key - hash_map publisherLists_; + hash_map publisherLists_; // Listed master public keys with the number of lists they appear on hash_map keyListings_; @@ -166,8 +252,12 @@ class ValidatorList // The master public keys of the current negative UNL hash_set negativeUNL_; - // Currently supported version of publisher list format - static constexpr std::uint32_t requiredListVersion = 1; + // Currently supported versions of publisher list format + static constexpr std::uint32_t supportedListVersions[]{1, 2}; + // In the initial release, to prevent potential abuse and attacks, any VL + // collection with more than 5 entries will be considered malformed. + static constexpr std::size_t maxSupportedBlobs = 5; + // Prefix of the file name used to store cache files. static const std::string filePrefix_; public: @@ -187,35 +277,51 @@ public: */ struct PublisherListStats { - explicit PublisherListStats(ListDisposition d) : disposition(d) - { - } - + explicit PublisherListStats() = default; + explicit PublisherListStats(ListDisposition d); PublisherListStats( ListDisposition d, PublicKey key, - bool avail, - std::size_t seq) - : disposition(d), publisherKey(key), available(avail), sequence(seq) - { - } + PublisherStatus stat, + std::size_t seq); - ListDisposition disposition; + ListDisposition + bestDisposition() const; + ListDisposition + worstDisposition() const; + void + mergeDispositions(PublisherListStats const& src); + + // Tracks the dispositions of each processed list and how many times it + // occurred + std::map dispositions; boost::optional publisherKey; - bool available = false; - boost::optional sequence; + PublisherStatus status = PublisherStatus::unavailable; + std::size_t sequence = 0; + }; + + struct MessageWithHash + { + explicit MessageWithHash() = default; + explicit MessageWithHash( + std::shared_ptr const& message_, + uint256 hash_, + std::size_t num_); + std::shared_ptr message; + uint256 hash; + std::size_t numVLs = 0; }; /** Load configured trusted keys. @param localSigningKey This node's validation public key - @param configKeys List of trusted keys from config. Each entry consists - of a base58 encoded validation public key, optionally followed by a - comment. + @param configKeys List of trusted keys from config. Each entry + consists of a base58 encoded validation public key, optionally followed + by a comment. - @param publisherKeys List of trusted publisher public keys. Each entry - contains a base58 encoded account public key. + @param publisherKeys List of trusted publisher public keys. Each + entry contains a base58 encoded account public key. @par Thread Safety @@ -229,17 +335,53 @@ public: std::vector const& configKeys, std::vector const& publisherKeys); - /** Apply published list of public keys, then broadcast it to all + /** Pull the blob/signature/manifest information out of the appropriate Json + body fields depending on the version. + + @return An empty vector indicates malformed Json. + */ + static std::vector + parseBlobs(std::uint32_t version, Json::Value const& body); + + static std::vector + parseBlobs(protocol::TMValidatorList const& body); + + static std::vector + parseBlobs(protocol::TMValidatorListCollection const& body); + + static void + sendValidatorList( + Peer& peer, + std::uint64_t peerSequence, + PublicKey const& publisherKey, + std::size_t maxSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + HashRouter& hashRouter, + beast::Journal j); + + [[nodiscard]] static std::pair + buildValidatorListMessages( + std::size_t messageVersion, + std::uint64_t peerSequence, + std::size_t maxSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + std::vector& messages, + std::size_t maxSize = maximiumMessageSize); + + /** Apply multiple published lists of public keys, then broadcast it to all peers that have not seen it or sent it. @param manifest base64-encoded publisher key manifest - @param blob base64-encoded json containing published validator list - - @param signature Signature of the decoded blob - @param version Version of published list format + @param blobs Vector of BlobInfos representing one or more encoded + validator lists and signatures (and optional manifests) + @param siteUri Uri of the site from which the list was validated @param hash Hash of the data parameters @@ -249,6 +391,9 @@ public: @param hashRouter HashRouter object which will determine which peers not to send to + @param networkOPs NetworkOPs object which will be informed if there + is a valid VL + @return `ListDisposition::accepted`, plus some of the publisher information, if list was successfully applied @@ -257,44 +402,41 @@ public: May be called concurrently */ PublisherListStats - applyListAndBroadcast( + applyListsAndBroadcast( std::string const& manifest, - std::string const& blob, - std::string const& signature, std::uint32_t version, + std::vector const& blobs, std::string siteUri, uint256 const& hash, Overlay& overlay, - HashRouter& hashRouter); + HashRouter& hashRouter, + NetworkOPs& networkOPs); - /** Apply published list of public keys + /** Apply multiple published lists of public keys. @param manifest base64-encoded publisher key manifest - @param blob base64-encoded json containing published validator list - - @param signature Signature of the decoded blob - @param version Version of published list format + @param blobs Vector of BlobInfos representing one or more encoded + validator lists and signatures (and optional manifests) + @param siteUri Uri of the site from which the list was validated - @param hash Optional hash of the data parameters. - Defaults to uninitialized + @param hash Optional hash of the data parameters @return `ListDisposition::accepted`, plus some of the publisher - information, if list was successfully applied + information, if list was successfully applied @par Thread Safety May be called concurrently */ PublisherListStats - applyList( + applyLists( std::string const& manifest, - std::string const& blob, - std::string const& signature, std::uint32_t version, + std::vector const& blobs, std::string siteUri, boost::optional const& hash = {}); @@ -326,7 +468,12 @@ public: May be called concurrently */ TrustChanges - updateTrusted(hash_set const& seenValidators); + updateTrusted( + hash_set const& seenValidators, + NetClock::time_point closeTime, + NetworkOPs& ops, + Overlay& overlay, + HashRouter& hashRouter); /** Get quorum value for current trusted key set @@ -461,20 +608,22 @@ public: May be called concurrently */ void - for_each_available(std::function func) const; + for_each_available( + std::function const& blobInfos, + PublicKey const& pubKey, + std::size_t maxSequence, + uint256 const& hash)> func) const; /** Returns the current valid list for the given publisher key, if available, as a Json object. */ boost::optional - getAvailable(boost::beast::string_view const& pubKey); + getAvailable( + boost::beast::string_view const& pubKey, + boost::optional forceVersion = {}); /** Return the number of configured validator list sites. */ std::size_t @@ -483,7 +632,7 @@ public: /** Return the time when the validator list will expire @note This may be a time in the past if a published list has not - been updated since its expiration. It will be boost::none if any + been updated since its validUntil. It will be boost::none if any configured published list has not been fetched. @par Thread Safety @@ -584,18 +733,113 @@ private: boost::optional expires(shared_lock const&) const; + /** Apply published list of public keys + + @param manifest base64-encoded publisher key manifest + + @param blob base64-encoded json containing published validator list + + @param signature Signature of the decoded blob + + @param version Version of published list format + + @param siteUri Uri of the site from which the list was validated + + @param hash Optional hash of the data parameters. + Defaults to uninitialized + + @return `ListDisposition::accepted`, plus some of the publisher + information, if list was successfully applied + + @par Thread Safety + + May be called concurrently + */ + PublisherListStats + applyList( + std::string const& globalManifest, + boost::optional const& localManifest, + std::string const& blob, + std::string const& signature, + std::uint32_t version, + std::string siteUri, + boost::optional const& hash, + lock_guard const&); + + void + updatePublisherList( + PublicKey const& pubKey, + PublisherList const& current, + std::vector const& oldList, + lock_guard const&); + + static void + buildBlobInfos( + std::map& blobInfos, + PublisherListCollection const& lists); + + static std::map + buildBlobInfos(PublisherListCollection const& lists); + + static void + broadcastBlobs( + PublicKey const& publisherKey, + PublisherListCollection const& lists, + std::size_t maxSequence, + uint256 const& hash, + Overlay& overlay, + HashRouter& hashRouter, + beast::Journal j); + + static void + sendValidatorList( + Peer& peer, + std::uint64_t peerSequence, + PublicKey const& publisherKey, + std::size_t maxSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + std::vector& messages, + HashRouter& hashRouter, + beast::Journal j); + /** Get the filename used for caching UNLs */ boost::filesystem::path - GetCacheFileName(unique_lock const&, PublicKey const& pubKey); + getCacheFileName(lock_guard const&, PublicKey const& pubKey) const; + + /** Build a Json representation of the collection, suitable for + writing to a cache file, or serving to a /vl/ query + */ + static Json::Value + buildFileData( + std::string const& pubKey, + PublisherListCollection const& pubCollection, + beast::Journal j); + + /** Build a Json representation of the collection, suitable for + writing to a cache file, or serving to a /vl/ query + */ + static Json::Value + buildFileData( + std::string const& pubKey, + PublisherListCollection const& pubCollection, + boost::optional forceVersion, + beast::Journal j); + + template + friend void + hash_append(Hasher& h, PublisherListCollection pl) + { + using beast::hash_append; + hash_append(h, pl.rawManifest, buildBlobInfos(pl), pl.rawVersion); + } /** Write a JSON UNL to a cache file */ void - CacheValidatorFile( - unique_lock const& lock, - PublicKey const& pubKey, - PublisherList const& publisher); + cacheValidatorFile(lock_guard const& lock, PublicKey const& pubKey) const; /** Check response for trusted valid published list @@ -607,7 +851,7 @@ private: */ ListDisposition verify( - unique_lock const&, + lock_guard const&, Json::Value& list, PublicKey& pubKey, std::string const& manifest, @@ -625,7 +869,10 @@ private: Calling public member function is expected to lock mutex */ bool - removePublisherList(unique_lock const&, PublicKey const& publisherKey); + removePublisherList( + lock_guard const&, + PublicKey const& publisherKey, + PublisherStatus reason); /** Return quorum for trusted validator set @@ -643,6 +890,63 @@ private: std::size_t effectiveUnlSize, std::size_t seenSize); }; + +// hashing helpers +template +void +hash_append(Hasher& h, ValidatorBlobInfo const& blobInfo) +{ + using beast::hash_append; + hash_append(h, blobInfo.blob, blobInfo.signature); + if (blobInfo.manifest) + { + hash_append(h, *blobInfo.manifest); + } +} + +template +void +hash_append(Hasher& h, std::vector const& blobs) +{ + for (auto const& item : blobs) + hash_append(h, item); +} + +template +void +hash_append(Hasher& h, std::map const& blobs) +{ + for (auto const& [_, item] : blobs) + { + (void)_; + hash_append(h, item); + } +} + } // namespace ripple +namespace protocol { + +template +void +hash_append(Hasher& h, TMValidatorList const& msg) +{ + using beast::hash_append; + hash_append(h, msg.manifest(), msg.blob(), msg.signature(), msg.version()); +} + +template +void +hash_append(Hasher& h, TMValidatorListCollection const& msg) +{ + using beast::hash_append; + hash_append( + h, + msg.manifest(), + ripple::ValidatorList::parseBlobs(msg), + msg.version()); +} + +} // namespace protocol + #endif diff --git a/src/ripple/app/misc/ValidatorSite.h b/src/ripple/app/misc/ValidatorSite.h index d77cfb087..785b3437d 100644 --- a/src/ripple/app/misc/ValidatorSite.h +++ b/src/ripple/app/misc/ValidatorSite.h @@ -48,7 +48,7 @@ namespace ripple { fields: @li @c "blob": Base64-encoded JSON string containing a @c "sequence", @c - "expiration", and @c "validators" field. @c "expiration" contains the + "validUntil", and @c "validators" field. @c "validUntil" contains the Ripple timestamp (seconds since January 1st, 2000 (00:00 UTC)) for when the list expires. @c "validators" contains an array of objects with a @c "validation_public_key" and optional @c "manifest" field. diff --git a/src/ripple/app/misc/impl/ValidatorList.cpp b/src/ripple/app/misc/impl/ValidatorList.cpp index 485452e5f..8105acbad 100644 --- a/src/ripple/app/misc/impl/ValidatorList.cpp +++ b/src/ripple/app/misc/impl/ValidatorList.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -26,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +36,7 @@ #include #include +#include #include namespace ripple { @@ -45,8 +48,14 @@ to_string(ListDisposition disposition) { case ListDisposition::accepted: return "accepted"; + case ListDisposition::expired: + return "expired"; case ListDisposition::same_sequence: return "same_sequence"; + case ListDisposition::pending: + return "pending"; + case ListDisposition::known_sequence: + return "known_sequence"; case ListDisposition::unsupported_version: return "unsupported_version"; case ListDisposition::untrusted: @@ -59,6 +68,53 @@ to_string(ListDisposition disposition) return "unknown"; } +ValidatorList::PublisherListStats::PublisherListStats(ListDisposition d) +{ + ++dispositions[d]; +} + +ValidatorList::PublisherListStats::PublisherListStats( + ListDisposition d, + PublicKey key, + PublisherStatus stat, + std::size_t seq) + : publisherKey(key), status(stat), sequence(seq) +{ + ++dispositions[d]; +} + +ListDisposition +ValidatorList::PublisherListStats::bestDisposition() const +{ + return dispositions.empty() ? ListDisposition::invalid + : dispositions.begin()->first; +} + +ListDisposition +ValidatorList::PublisherListStats::worstDisposition() const +{ + return dispositions.empty() ? ListDisposition::invalid + : dispositions.rbegin()->first; +} + +void +ValidatorList::PublisherListStats::mergeDispositions( + PublisherListStats const& src) +{ + for (auto const [disp, count] : src.dispositions) + { + dispositions[disp] += count; + } +} + +ValidatorList::MessageWithHash::MessageWithHash( + std::shared_ptr const& message_, + uint256 hash_, + std::size_t num_) + : message(message_), hash(hash_), numVLs(num_) +{ +} + const std::string ValidatorList::filePrefix_ = "cache."; ValidatorList::ValidatorList( @@ -96,7 +152,7 @@ ValidatorList::load( ")?" // end optional comment block ); - std::unique_lock lock{mutex_}; + std::lock_guard lock{mutex_}; JLOG(j_.debug()) << "Loading configured trusted validator list publisher keys"; @@ -115,13 +171,14 @@ ValidatorList::load( } auto id = PublicKey(makeSlice(*ret)); + auto status = PublisherStatus::unavailable; if (publisherManifests_.revoked(id)) { JLOG(j_.warn()) << "Configured validator list publisher key is revoked: " << key; - continue; + status = PublisherStatus::revoked; } if (publisherLists_.count(id)) @@ -131,7 +188,7 @@ ValidatorList::load( continue; } - publisherLists_[id].available = false; + publisherLists_[id].status = status; ++count; } @@ -146,7 +203,6 @@ ValidatorList::load( JLOG(j_.debug()) << "Loading configured validator keys"; count = 0; - PublicKey local; for (auto const& n : configKeys) { JLOG(j_.trace()) << "Processing '" << n << "'"; @@ -177,15 +233,13 @@ ValidatorList::load( JLOG(j_.warn()) << "Duplicate node identity: " << match[1]; continue; } - auto it = publisherLists_.emplace( - std::piecewise_construct, - std::forward_as_tuple(local), - std::forward_as_tuple()); + auto [it, inserted] = publisherLists_.emplace(); // Config listed keys never expire - if (it.second) - it.first->second.expiration = TimeKeeper::time_point::max(); - it.first->second.list.emplace_back(*id); - it.first->second.available = true; + auto& current = it->second.current; + if (inserted) + current.validUntil = TimeKeeper::time_point::max(); + current.list.emplace_back(*id); + it->second.status = PublisherStatus::available; ++count; } @@ -195,32 +249,105 @@ ValidatorList::load( } boost::filesystem::path -ValidatorList::GetCacheFileName( - ValidatorList::unique_lock const&, - PublicKey const& pubKey) +ValidatorList::getCacheFileName( + ValidatorList::lock_guard const&, + PublicKey const& pubKey) const { return dataPath_ / (filePrefix_ + strHex(pubKey)); } +// static +Json::Value +ValidatorList::buildFileData( + std::string const& pubKey, + ValidatorList::PublisherListCollection const& pubCollection, + beast::Journal j) +{ + return buildFileData(pubKey, pubCollection, {}, j); +} + +// static +Json::Value +ValidatorList::buildFileData( + std::string const& pubKey, + ValidatorList::PublisherListCollection const& pubCollection, + boost::optional forceVersion, + beast::Journal j) +{ + Json::Value value(Json::objectValue); + + assert(pubCollection.rawVersion == 2 || pubCollection.remaining.empty()); + auto const effectiveVersion = + forceVersion ? *forceVersion : pubCollection.rawVersion; + + value[jss::manifest] = pubCollection.rawManifest; + value[jss::version] = effectiveVersion; + value[jss::public_key] = pubKey; + + switch (effectiveVersion) + { + case 1: { + auto const& current = pubCollection.current; + value[jss::blob] = current.rawBlob; + value[jss::signature] = current.rawSignature; + // This is only possible if "downgrading" a v2 UNL to v1, for + // example for the /vl/ endpoint. + if (current.rawManifest && + *current.rawManifest != pubCollection.rawManifest) + value[jss::manifest] = *current.rawManifest; + break; + } + case 2: { + Json::Value blobs(Json::arrayValue); + + auto add = [&blobs, &outerManifest = pubCollection.rawManifest]( + PublisherList const& pubList) { + auto& blob = blobs.append(Json::objectValue); + blob[jss::blob] = pubList.rawBlob; + blob[jss::signature] = pubList.rawSignature; + if (pubList.rawManifest && + *pubList.rawManifest != outerManifest) + blob[jss::manifest] = *pubList.rawManifest; + }; + + add(pubCollection.current); + for (auto const& [_, pending] : pubCollection.remaining) + { + (void)_; + add(pending); + } + + value[jss::blobs_v2] = std::move(blobs); + break; + } + default: + JLOG(j.trace()) + << "Invalid VL version provided: " << effectiveVersion; + value = Json::nullValue; + } + + return value; +} + void -ValidatorList::CacheValidatorFile( - ValidatorList::unique_lock const& lock, - PublicKey const& pubKey, - PublisherList const& publisher) +ValidatorList::cacheValidatorFile( + ValidatorList::lock_guard const& lock, + PublicKey const& pubKey) const { if (dataPath_.empty()) return; - boost::filesystem::path const filename = GetCacheFileName(lock, pubKey); + boost::filesystem::path const filename = getCacheFileName(lock, pubKey); boost::system::error_code ec; - Json::Value value(Json::objectValue); - - value["manifest"] = publisher.rawManifest; - value["blob"] = publisher.rawBlob; - value["signature"] = publisher.rawSignature; - value["version"] = publisher.rawVersion; + Json::Value value = + buildFileData(strHex(pubKey), publisherLists_.at(pubKey), j_); + // rippled should be the only process writing to this file, so + // if it ever needs to be read, it is not expected to change externally, so + // delay the refresh as long as possible: 24 hours. (See also + // `ValidatorSite::missingSite()`) + value[jss::refresh_interval] = 24 * 60; writeFileContents(ec, filename, value.toStyledString()); @@ -232,152 +359,651 @@ ValidatorList::CacheValidatorFile( } } -ValidatorList::PublisherListStats -ValidatorList::applyListAndBroadcast( - std::string const& manifest, - std::string const& blob, - std::string const& signature, - std::uint32_t version, - std::string siteUri, +// static +std::vector +ValidatorList::parseBlobs(std::uint32_t version, Json::Value const& body) +{ + std::vector result; + switch (version) + { + case 1: { + if (!body.isMember(jss::blob) || !body[jss::blob].isString() || + !body.isMember(jss::signature) || + !body[jss::signature].isString() || + // If the v2 field is present, the VL is malformed + body.isMember(jss::blobs_v2)) + return {}; + ValidatorBlobInfo& info = result.emplace_back(); + info.blob = body[jss::blob].asString(); + info.signature = body[jss::signature].asString(); + assert(result.size() == 1); + return result; + } + // Treat unknown versions as if they're the latest version. This + // will likely break a bunch of unit tests each time we introduce a + // new version, so don't do it casually. Note that the version is + // validated elsewhere. + case 2: + default: { + if (!body.isMember(jss::blobs_v2) || + !body[jss::blobs_v2].isArray() || + body[jss::blobs_v2].size() > maxSupportedBlobs || + // If any of the v1 fields are present, the VL is malformed + body.isMember(jss::blob) || body.isMember(jss::signature)) + return {}; + auto const& blobs = body[jss::blobs_v2]; + result.reserve(blobs.size()); + for (auto const& blobInfo : blobs) + { + if (!blobInfo.isObject() || + !blobInfo.isMember(jss::signature) || + !blobInfo[jss::signature].isString() || + !blobInfo.isMember(jss::blob) || + !blobInfo[jss::blob].isString()) + return {}; + ValidatorBlobInfo& info = result.emplace_back(); + info.blob = blobInfo[jss::blob].asString(); + info.signature = blobInfo[jss::signature].asString(); + if (blobInfo.isMember(jss::manifest)) + { + if (!blobInfo[jss::manifest].isString()) + return {}; + info.manifest = blobInfo[jss::manifest].asString(); + } + } + assert(result.size() == blobs.size()); + return result; + } + } +} + +// static +std::vector +ValidatorList::parseBlobs(protocol::TMValidatorList const& body) +{ + return {{body.blob(), body.signature(), {}}}; +} + +// static +std::vector +ValidatorList::parseBlobs(protocol::TMValidatorListCollection const& body) +{ + if (body.blobs_size() > maxSupportedBlobs) + return {}; + std::vector result; + result.reserve(body.blobs_size()); + for (auto const& blob : body.blobs()) + { + ValidatorBlobInfo& info = result.emplace_back(); + info.blob = blob.blob(); + info.signature = blob.signature(); + if (blob.has_manifest()) + { + info.manifest = blob.manifest(); + } + } + assert(result.size() == body.blobs_size()); + return result; +} + +std::size_t +splitMessageParts( + std::vector& messages, + protocol::TMValidatorListCollection const& largeMsg, + std::size_t maxSize, + std::size_t begin, + std::size_t end); + +std::size_t +splitMessage( + std::vector& messages, + protocol::TMValidatorListCollection const& largeMsg, + std::size_t maxSize, + std::size_t begin = 0, + std::size_t end = 0) +{ + if (begin == 0 && end == 0) + end = largeMsg.blobs_size(); + assert(begin < end); + if (end <= begin) + return 0; + + auto mid = (begin + end) / 2; + // The parts function will do range checking + // Use two separate calls to ensure deterministic order + auto result = splitMessageParts(messages, largeMsg, maxSize, begin, mid); + return result + splitMessageParts(messages, largeMsg, maxSize, mid, end); +} + +std::size_t +splitMessageParts( + std::vector& messages, + protocol::TMValidatorListCollection const& largeMsg, + std::size_t maxSize, + std::size_t begin, + std::size_t end) +{ + if (end <= begin) + return 0; + if (end - begin == 1) + { + protocol::TMValidatorList smallMsg; + smallMsg.set_version(1); + smallMsg.set_manifest(largeMsg.manifest()); + + auto const& blob = largeMsg.blobs(begin); + smallMsg.set_blob(blob.blob()); + smallMsg.set_signature(blob.signature()); + // This is only possible if "downgrading" a v2 UNL to v1. + if (blob.has_manifest()) + smallMsg.set_manifest(blob.manifest()); + + assert(Message::totalSize(smallMsg) <= maximiumMessageSize); + + messages.emplace_back( + std::make_shared(smallMsg, protocol::mtVALIDATORLIST), + sha512Half(smallMsg), + 1); + return messages.back().numVLs; + } + else + { + boost::optional smallMsg; + smallMsg.emplace(); + smallMsg->set_version(largeMsg.version()); + smallMsg->set_manifest(largeMsg.manifest()); + + for (std::size_t i = begin; i < end; ++i) + { + *smallMsg->add_blobs() = largeMsg.blobs(i); + } + + if (Message::totalSize(*smallMsg) > maxSize) + { + // free up the message space + smallMsg.reset(); + return splitMessage(messages, largeMsg, maxSize, begin, end); + } + else + { + messages.emplace_back( + std::make_shared( + *smallMsg, protocol::mtVALIDATORLISTCOLLECTION), + sha512Half(*smallMsg), + smallMsg->blobs_size()); + return messages.back().numVLs; + } + } + return 0; +} + +// Build a v1 protocol message using only the current VL +std::size_t +buildValidatorListMessage( + std::vector& messages, + std::uint32_t rawVersion, + std::string const& rawManifest, + ValidatorBlobInfo const& currentBlob, + std::size_t maxSize) +{ + assert(messages.empty()); + protocol::TMValidatorList msg; + auto const manifest = + currentBlob.manifest ? *currentBlob.manifest : rawManifest; + auto const version = 1; + msg.set_manifest(manifest); + msg.set_blob(currentBlob.blob); + msg.set_signature(currentBlob.signature); + // Override the version + msg.set_version(version); + + assert(Message::totalSize(msg) <= maximiumMessageSize); + messages.emplace_back( + std::make_shared(msg, protocol::mtVALIDATORLIST), + sha512Half(msg), + 1); + return 1; +} + +// Build a v2 protocol message using all the VLs with sequence larger than the +// peer's +std::size_t +buildValidatorListMessage( + std::vector& messages, + std::uint64_t peerSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + std::size_t maxSize) +{ + assert(messages.empty()); + protocol::TMValidatorListCollection msg; + auto const version = rawVersion < 2 ? 2 : rawVersion; + msg.set_version(version); + msg.set_manifest(rawManifest); + + for (auto const& [sequence, blobInfo] : blobInfos) + { + if (sequence <= peerSequence) + continue; + protocol::ValidatorBlobInfo& blob = *msg.add_blobs(); + blob.set_blob(blobInfo.blob); + blob.set_signature(blobInfo.signature); + if (blobInfo.manifest) + blob.set_manifest(*blobInfo.manifest); + } + assert(msg.blobs_size() > 0); + if (Message::totalSize(msg) > maxSize) + { + // split into smaller messages + return splitMessage(messages, msg, maxSize); + } + else + { + messages.emplace_back( + std::make_shared(msg, protocol::mtVALIDATORLISTCOLLECTION), + sha512Half(msg), + msg.blobs_size()); + return messages.back().numVLs; + } +} + +[[nodiscard]] +// static +std::pair +ValidatorList::buildValidatorListMessages( + std::size_t messageVersion, + std::uint64_t peerSequence, + std::size_t maxSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + std::vector& messages, + std::size_t maxSize /*= maximiumMessageSize*/) +{ + assert(!blobInfos.empty()); + auto const& [currentSeq, currentBlob] = *blobInfos.begin(); + auto numVLs = std::accumulate( + messages.begin(), + messages.end(), + 0, + [](std::size_t total, MessageWithHash const& m) { + return total + m.numVLs; + }); + if (messageVersion == 2 && peerSequence < maxSequence) + { + // Version 2 + if (messages.empty()) + { + numVLs = buildValidatorListMessage( + messages, + peerSequence, + rawVersion, + rawManifest, + blobInfos, + maxSize); + if (messages.empty()) + // No message was generated. Create an empty placeholder so we + // dont' repeat the work later. + messages.emplace_back(); + } + + // Don't send it next time. + return {maxSequence, numVLs}; + } + else if (messageVersion == 1 && peerSequence < currentSeq) + { + // Version 1 + if (messages.empty()) + { + numVLs = buildValidatorListMessage( + messages, + rawVersion, + currentBlob.manifest ? *currentBlob.manifest : rawManifest, + currentBlob, + maxSize); + if (messages.empty()) + // No message was generated. Create an empty placeholder so we + // dont' repeat the work later. + messages.emplace_back(); + } + + // Don't send it next time. + return {currentSeq, numVLs}; + } + return {0, 0}; +} + +// static +void +ValidatorList::sendValidatorList( + Peer& peer, + std::uint64_t peerSequence, + PublicKey const& publisherKey, + std::size_t maxSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + std::vector& messages, + HashRouter& hashRouter, + beast::Journal j) +{ + std::size_t const messageVersion = + peer.supportsFeature(ProtocolFeature::ValidatorList2Propagation) + ? 2 + : peer.supportsFeature(ProtocolFeature::ValidatorListPropagation) ? 1 + : 0; + if (!messageVersion) + return; + auto const [newPeerSequence, numVLs] = buildValidatorListMessages( + messageVersion, + peerSequence, + maxSequence, + rawVersion, + rawManifest, + blobInfos, + messages); + if (newPeerSequence) + { + assert(!messages.empty()); + // Don't send it next time. + peer.setPublisherListSequence(publisherKey, newPeerSequence); + + bool sent = false; + for (auto const& message : messages) + { + if (message.message) + { + peer.send(message.message); + hashRouter.addSuppressionPeer(message.hash, peer.id()); + sent = true; + } + } + // The only way sent wil be false is if the messages was too big, and + // thus there will only be one entry without a message + assert(sent || messages.size() == 1); + if (sent) + { + if (messageVersion > 1) + JLOG(j.debug()) + << "Sent " << messages.size() + << " validator list collection(s) containing " << numVLs + << " validator list(s) for " << strHex(publisherKey) + << " with sequence range " << peerSequence << ", " + << newPeerSequence << " to " + << peer.getRemoteAddress().to_string() << " [" << peer.id() + << "]"; + else + { + assert(numVLs == 1); + JLOG(j.debug()) + << "Sent validator list for " << strHex(publisherKey) + << " with sequence " << newPeerSequence << " to " + << peer.getRemoteAddress().to_string() << " [" << peer.id() + << "]"; + } + } + } +} + +// static +void +ValidatorList::sendValidatorList( + Peer& peer, + std::uint64_t peerSequence, + PublicKey const& publisherKey, + std::size_t maxSequence, + std::uint32_t rawVersion, + std::string const& rawManifest, + std::map const& blobInfos, + HashRouter& hashRouter, + beast::Journal j) +{ + std::vector messages; + sendValidatorList( + peer, + peerSequence, + publisherKey, + maxSequence, + rawVersion, + rawManifest, + blobInfos, + messages, + hashRouter, + j); +} + +// static +void +ValidatorList::buildBlobInfos( + std::map& blobInfos, + ValidatorList::PublisherListCollection const& lists) +{ + auto const& current = lists.current; + auto const& remaining = lists.remaining; + blobInfos[current.sequence] = { + current.rawBlob, current.rawSignature, current.rawManifest}; + for (auto const& [sequence, vl] : remaining) + { + blobInfos[sequence] = {vl.rawBlob, vl.rawSignature, vl.rawManifest}; + } +} + +// static +std::map +ValidatorList::buildBlobInfos( + ValidatorList::PublisherListCollection const& lists) +{ + std::map result; + buildBlobInfos(result, lists); + return result; +} + +// static +void +ValidatorList::broadcastBlobs( + PublicKey const& publisherKey, + ValidatorList::PublisherListCollection const& lists, + std::size_t maxSequence, uint256 const& hash, Overlay& overlay, - HashRouter& hashRouter) + HashRouter& hashRouter, + beast::Journal j) { - auto const result = - applyList(manifest, blob, signature, version, std::move(siteUri), hash); - auto const disposition = result.disposition; + auto const toSkip = hashRouter.shouldRelay(hash); - bool broadcast = disposition == ListDisposition::accepted || - disposition == ListDisposition::same_sequence; - - if (broadcast) + if (toSkip) { - assert(result.available && result.publisherKey && result.sequence); - auto const toSkip = hashRouter.shouldRelay(hash); + // We don't know what messages or message versions we're sending + // until we examine our peer's properties. Build the message(s) on + // demand, but reuse them when possible. - if (toSkip) + // This will hold a v1 message with only the current VL if we have + // any peers that don't support v2 + std::vector messages1; + // This will hold v2 messages indexed by the peer's + // `publisherListSequence`. For each `publisherListSequence`, we'll + // only send the VLs with higher sequences. + std::map> + messages2; + // If any peers are found that are worth considering, this list will + // be built to hold info for all of the valid VLs. + std::map blobInfos; + + assert( + lists.current.sequence == maxSequence || + lists.remaining.count(maxSequence) == 1); + // Can't use overlay.foreach here because we need to modify + // the peer, and foreach provides a const& + for (auto& peer : overlay.getActivePeers()) { - protocol::TMValidatorList msg; - msg.set_manifest(manifest); - msg.set_blob(blob); - msg.set_signature(signature); - msg.set_version(version); - - auto const& publisherKey = *result.publisherKey; - auto const sequence = *result.sequence; - - // Can't use overlay.foreach here because we need to modify - // the peer, and foreach provides a const& - auto message = - std::make_shared(msg, protocol::mtVALIDATORLIST); - for (auto& peer : overlay.getActivePeers()) + if (toSkip->count(peer->id()) == 0) { - if (toSkip->count(peer->id()) == 0 && - peer->supportsFeature( - ProtocolFeature::ValidatorListPropagation) && - peer->publisherListSequence(publisherKey) < sequence) + auto const peerSequence = + peer->publisherListSequence(publisherKey).value_or(0); + if (peerSequence < maxSequence) { - peer->send(message); - - JLOG(j_.debug()) - << "Sent validator list for " << strHex(publisherKey) - << " with sequence " << sequence << " to " - << peer->getRemoteAddress().to_string() << " (" - << peer->id() << ")"; - // Don't send it next time. + if (blobInfos.empty()) + buildBlobInfos(blobInfos, lists); + auto const v2 = peer->supportsFeature( + ProtocolFeature::ValidatorList2Propagation); + sendValidatorList( + *peer, + peerSequence, + publisherKey, + maxSequence, + lists.rawVersion, + lists.rawManifest, + blobInfos, + v2 ? messages2[peerSequence] : messages1, + hashRouter, + j); + // Even if the peer doesn't support the messages, + // suppress it so it'll be ignored next time. hashRouter.addSuppressionPeer(hash, peer->id()); - peer->setPublisherListSequence(publisherKey, sequence); } } } } +} + +ValidatorList::PublisherListStats +ValidatorList::applyListsAndBroadcast( + std::string const& manifest, + std::uint32_t version, + std::vector const& blobs, + std::string siteUri, + uint256 const& hash, + Overlay& overlay, + HashRouter& hashRouter, + NetworkOPs& networkOPs) +{ + auto const result = + applyLists(manifest, version, blobs, std::move(siteUri), hash); + auto const disposition = result.bestDisposition(); + + if (disposition == ListDisposition::accepted) + { + bool good = true; + for (auto const& [pubKey, listCollection] : publisherLists_) + { + (void)pubKey; + if (listCollection.status != PublisherStatus::available) + { + good = false; + break; + } + } + if (good) + { + networkOPs.clearUNLBlocked(); + } + } + bool broadcast = disposition <= ListDisposition::known_sequence; + + if (broadcast) + { + auto const& pubCollection = publisherLists_[*result.publisherKey]; + assert( + result.status <= PublisherStatus::expired && result.publisherKey && + pubCollection.maxSequence); + broadcastBlobs( + *result.publisherKey, + pubCollection, + *pubCollection.maxSequence, + hash, + overlay, + hashRouter, + j_); + } return result; } ValidatorList::PublisherListStats -ValidatorList::applyList( +ValidatorList::applyLists( std::string const& manifest, - std::string const& blob, - std::string const& signature, std::uint32_t version, + std::vector const& blobs, std::string siteUri, - boost::optional const& hash) + boost::optional const& hash /* = {} */) { - using namespace std::string_literals; - - if (version != requiredListVersion) + if (std::count( + std::begin(supportedListVersions), + std::end(supportedListVersions), + version) != 1) return PublisherListStats{ListDisposition::unsupported_version}; - std::unique_lock lock{mutex_}; + std::lock_guard lock{mutex_}; - Json::Value list; - PublicKey pubKey; - auto const result = verify(lock, list, pubKey, manifest, blob, signature); - if (result != ListDisposition::accepted) + PublisherListStats result; + for (auto const& blobInfo : blobs) { - if (result == ListDisposition::same_sequence && - publisherLists_.count(pubKey)) + auto stats = applyList( + manifest, + blobInfo.manifest, + blobInfo.blob, + blobInfo.signature, + version, + siteUri, + hash, + lock); + + if (stats.bestDisposition() < result.bestDisposition() || + (stats.bestDisposition() == result.bestDisposition() && + stats.sequence > result.sequence)) { - // We've seen this valid list already, so return - // what we know about it. - auto const& publisher = publisherLists_[pubKey]; - return PublisherListStats{ - result, pubKey, publisher.available, publisher.sequence}; + stats.mergeDispositions(result); + result = std::move(stats); } - return PublisherListStats{result}; + else + result.mergeDispositions(stats); + ///////// } - // Update publisher's list - Json::Value const& newList = list["validators"]; - auto& publisher = publisherLists_[pubKey]; - publisher.available = true; - publisher.sequence = list["sequence"].asUInt(); - publisher.expiration = TimeKeeper::time_point{ - TimeKeeper::duration{list["expiration"].asUInt()}}; - publisher.siteUri = std::move(siteUri); - publisher.rawManifest = manifest; - publisher.rawBlob = blob; - publisher.rawSignature = signature; - publisher.rawVersion = version; - if (hash) - publisher.hash = *hash; - std::vector& publisherList = publisher.list; - - PublisherListStats const applyResult{ - result, pubKey, publisher.available, publisher.sequence}; - - std::vector oldList = publisherList; - publisherList.clear(); - publisherList.reserve(newList.size()); - std::vector manifests; - for (auto const& val : newList) + // Clean up the collection, because some of the processing may have made it + // inconsistent + if (result.publisherKey && publisherLists_.count(*result.publisherKey)) { - if (val.isObject() && val.isMember("validation_public_key") && - val["validation_public_key"].isString()) + auto& pubCollection = publisherLists_[*result.publisherKey]; + auto& remaining = pubCollection.remaining; + auto const& current = pubCollection.current; + for (auto iter = remaining.begin(); iter != remaining.end();) { - boost::optional const ret = - strUnHex(val["validation_public_key"].asString()); - - if (!ret || !publicKeyType(makeSlice(*ret))) + auto next = std::next(iter); + assert(next == remaining.end() || next->first > iter->first); + if (iter->first <= current.sequence || + (next != remaining.end() && + next->second.validFrom <= iter->second.validFrom)) { - JLOG(j_.error()) << "Invalid node identity: " - << val["validation_public_key"].asString(); + iter = remaining.erase(iter); } else { - publisherList.push_back( - PublicKey(Slice{ret->data(), ret->size()})); + iter = next; } - - if (val.isMember("manifest") && val["manifest"].isString()) - manifests.push_back(val["manifest"].asString()); } + + cacheValidatorFile(lock, *result.publisherKey); + + pubCollection.fullHash = sha512Half(pubCollection); + + result.sequence = *pubCollection.maxSequence; } - // Update keyListings_ for added and removed keys - std::sort(publisherList.begin(), publisherList.end()); + return result; +} +void +ValidatorList::updatePublisherList( + PublicKey const& pubKey, + PublisherList const& current, + std::vector const& oldList, + ValidatorList::lock_guard const&) +{ + // Update keyListings_ for added and removed keys + std::vector const& publisherList = current.list; + std::vector const& manifests = current.manifests; auto iNew = publisherList.begin(); auto iOld = oldList.begin(); while (iNew != publisherList.end() || iOld != oldList.end()) @@ -430,9 +1056,150 @@ ValidatorList::applyList( << " contained invalid validator manifest"; } } +} - // Cache the validator list in a file - CacheValidatorFile(lock, pubKey, publisher); +ValidatorList::PublisherListStats +ValidatorList::applyList( + std::string const& globalManifest, + boost::optional const& localManifest, + std::string const& blob, + std::string const& signature, + std::uint32_t version, + std::string siteUri, + boost::optional const& hash, + ValidatorList::lock_guard const& lock) +{ + using namespace std::string_literals; + + Json::Value list; + PublicKey pubKey; + auto const& manifest = localManifest ? *localManifest : globalManifest; + auto const result = verify(lock, list, pubKey, manifest, blob, signature); + if (result > ListDisposition::pending) + { + if (publisherLists_.count(pubKey)) + { + auto const& pubCollection = publisherLists_[pubKey]; + if (pubCollection.maxSequence && + (result == ListDisposition::same_sequence || + result == ListDisposition::known_sequence)) + { + // We've seen something valid list for this publisher + // already, so return what we know about it. + return PublisherListStats{ + result, + pubKey, + pubCollection.status, + *pubCollection.maxSequence}; + } + } + return PublisherListStats{result}; + } + + // Update publisher's list + auto& pubCollection = publisherLists_[pubKey]; + auto const sequence = list[jss::sequence].asUInt(); + auto const accepted = + (result == ListDisposition::accepted || + result == ListDisposition::expired); + + if (accepted) + pubCollection.status = result == ListDisposition::accepted + ? PublisherStatus::available + : PublisherStatus::expired; + pubCollection.rawManifest = globalManifest; + if (!pubCollection.maxSequence || sequence > *pubCollection.maxSequence) + pubCollection.maxSequence = sequence; + + Json::Value const& newList = list[jss::validators]; + std::vector oldList; + if (accepted && pubCollection.remaining.count(sequence) != 0) + { + // We've seen this list before and stored it in "remaining". The + // normal expected process is that the processed list would have + // already been moved in to "current" by "updateTrusted()", but race + // conditions are possible, or the node may have lost sync, so do + // some of that work here. + auto& publisher = pubCollection.current; + // Copy the old validator list + oldList = std::move(pubCollection.current.list); + // Move the publisher info from "remaining" to "current" + publisher = std::move(pubCollection.remaining[sequence]); + // Remove the entry in "remaining" + pubCollection.remaining.erase(sequence); + // Done + assert(publisher.sequence == sequence); + } + else + { + auto& publisher = accepted ? pubCollection.current + : pubCollection.remaining[sequence]; + publisher.sequence = sequence; + publisher.validFrom = TimeKeeper::time_point{TimeKeeper::duration{ + list.isMember(jss::effective) ? list[jss::effective].asUInt() : 0}}; + publisher.validUntil = TimeKeeper::time_point{ + TimeKeeper::duration{list[jss::expiration].asUInt()}}; + publisher.siteUri = std::move(siteUri); + publisher.rawBlob = blob; + publisher.rawSignature = signature; + publisher.rawManifest = localManifest; + if (hash) + publisher.hash = *hash; + + std::vector& publisherList = publisher.list; + std::vector& manifests = publisher.manifests; + + // Copy the old validator list + oldList = std::move(publisherList); + // Build the new validator list from "newList" + publisherList.clear(); + publisherList.reserve(newList.size()); + for (auto const& val : newList) + { + if (val.isObject() && val.isMember(jss::validation_public_key) && + val[jss::validation_public_key].isString()) + { + boost::optional const ret = + strUnHex(val[jss::validation_public_key].asString()); + + if (!ret || !publicKeyType(makeSlice(*ret))) + { + JLOG(j_.error()) + << "Invalid node identity: " + << val[jss::validation_public_key].asString(); + } + else + { + publisherList.push_back( + PublicKey(Slice{ret->data(), ret->size()})); + } + + if (val.isMember(jss::manifest) && + val[jss::manifest].isString()) + manifests.push_back(val[jss::manifest].asString()); + } + } + + // Standardize the list order by sorting + std::sort(publisherList.begin(), publisherList.end()); + } + // If this publisher has ever sent a more updated version than the one + // in this file, keep it. This scenario is unlikely, but legal. + pubCollection.rawVersion = std::max(pubCollection.rawVersion, version); + if (!pubCollection.remaining.empty()) + { + // If there are any pending VLs, then this collection must be at least + // version 2. + pubCollection.rawVersion = std::max(pubCollection.rawVersion, 2u); + } + + PublisherListStats const applyResult{ + result, pubKey, pubCollection.status, *pubCollection.maxSequence}; + + if (accepted) + { + updatePublisherList(pubKey, pubCollection.current, oldList, lock); + } return applyResult; } @@ -444,18 +1211,18 @@ ValidatorList::loadLists() using namespace boost::filesystem; using namespace boost::system::errc; - std::unique_lock lock{mutex_}; + std::lock_guard lock{mutex_}; std::vector sites; sites.reserve(publisherLists_.size()); - for (auto const& [pubKey, publisher] : publisherLists_) + for (auto const& [pubKey, publisherCollection] : publisherLists_) { boost::system::error_code ec; - if (publisher.available) + if (publisherCollection.status == PublisherStatus::available) continue; - boost::filesystem::path const filename = GetCacheFileName(lock, pubKey); + boost::filesystem::path const filename = getCacheFileName(lock, pubKey); auto const fullPath{canonical(filename, ec)}; if (ec) @@ -492,7 +1259,7 @@ ValidatorList::loadLists() ListDisposition ValidatorList::verify( - ValidatorList::unique_lock const& lock, + ValidatorList::lock_guard const& lock, Json::Value& list, PublicKey& pubKey, std::string const& manifest, @@ -511,8 +1278,9 @@ ValidatorList::verify( if (revoked && result == ManifestDisposition::accepted) { - removePublisherList(lock, pubKey); - publisherLists_.erase(pubKey); + removePublisherList(lock, pubKey, PublisherStatus::revoked); + // If the manifest is revoked, no future list is valid either + publisherLists_[pubKey].remaining.clear(); } if (revoked || result == ManifestDisposition::invalid) @@ -531,18 +1299,45 @@ ValidatorList::verify( if (!r.parse(data, list)) return ListDisposition::invalid; - if (list.isMember("sequence") && list["sequence"].isInt() && - list.isMember("expiration") && list["expiration"].isInt() && - list.isMember("validators") && list["validators"].isArray()) + if (list.isMember(jss::sequence) && list[jss::sequence].isInt() && + list.isMember(jss::expiration) && list[jss::expiration].isInt() && + (!list.isMember(jss::effective) || list[jss::effective].isInt()) && + list.isMember(jss::validators) && list[jss::validators].isArray()) { - auto const sequence = list["sequence"].asUInt(); - auto const expiration = TimeKeeper::time_point{ - TimeKeeper::duration{list["expiration"].asUInt()}}; - if (sequence < publisherLists_[pubKey].sequence || - expiration <= timeKeeper_.now()) + auto const sequence = list[jss::sequence].asUInt(); + auto const validFrom = TimeKeeper::time_point{TimeKeeper::duration{ + list.isMember(jss::effective) ? list[jss::effective].asUInt() : 0}}; + auto const validUntil = TimeKeeper::time_point{ + TimeKeeper::duration{list[jss::expiration].asUInt()}}; + auto const now = timeKeeper_.now(); + auto const& listCollection = publisherLists_[pubKey]; + if (validUntil <= validFrom) + return ListDisposition::invalid; + else if (sequence < listCollection.current.sequence) return ListDisposition::stale; - else if (sequence == publisherLists_[pubKey].sequence) + else if (sequence == listCollection.current.sequence) return ListDisposition::same_sequence; + else if (validUntil <= now) + return ListDisposition::expired; + else if (validFrom > now) + // Not yet valid. Return pending if one of the following is true + // * There's no maxSequence, indicating this is the first blob seen + // for this publisher + // * The sequence is larger than the maxSequence, indicating this + // blob is new + // * There's no entry for this sequence AND this blob is valid + // before the last blob, indicating blobs may be processing out of + // order. This may result in some duplicated processing, but + // prevents the risk of missing valid data. Else return + // known_sequence + return !listCollection.maxSequence || + sequence > *listCollection.maxSequence || + (listCollection.remaining.count(sequence) == 0 && + validFrom < listCollection.remaining + .at(*listCollection.maxSequence) + .validFrom) + ? ListDisposition::pending + : ListDisposition::known_sequence; } else { @@ -611,7 +1406,8 @@ bool ValidatorList::trustedPublisher(PublicKey const& identity) const { std::shared_lock read_lock{mutex_}; - return identity.size() && publisherLists_.count(identity); + return identity.size() && publisherLists_.count(identity) && + publisherLists_.at(identity).status < PublisherStatus::revoked; } PublicKey @@ -623,9 +1419,13 @@ ValidatorList::localPublicKey() const bool ValidatorList::removePublisherList( - ValidatorList::unique_lock const&, - PublicKey const& publisherKey) + ValidatorList::lock_guard const&, + PublicKey const& publisherKey, + PublisherStatus reason) { + assert( + reason != PublisherStatus::available && + reason != PublisherStatus::unavailable); auto const iList = publisherLists_.find(publisherKey); if (iList == publisherLists_.end()) return false; @@ -633,7 +1433,7 @@ ValidatorList::removePublisherList( JLOG(j_.debug()) << "Removing validator list for publisher " << strHex(publisherKey); - for (auto const& val : iList->second.list) + for (auto const& val : iList->second.current.list) { auto const& iVal = keyListings_.find(val); if (iVal == keyListings_.end()) @@ -645,8 +1445,8 @@ ValidatorList::removePublisherList( --iVal->second; } - iList->second.list.clear(); - iList->second.available = false; + iList->second.current.list.clear(); + iList->second.status = reason; return true; } @@ -668,15 +1468,32 @@ boost::optional ValidatorList::expires(ValidatorList::shared_lock const&) const { boost::optional res{boost::none}; - for (auto const& p : publisherLists_) + for (auto const& [pubKey, collection] : publisherLists_) { + (void)pubKey; // Unfetched - if (p.second.expiration == TimeKeeper::time_point{}) + auto const& current = collection.current; + if (current.validUntil == TimeKeeper::time_point{}) return boost::none; + // Find the latest validUntil in a chain where the next validFrom + // overlaps with the previous validUntil. applyLists has already cleaned + // up the list so the validFrom dates are guaranteed increasing. + auto chainedExpiration = current.validUntil; + for (auto const& [sequence, check] : collection.remaining) + { + (void)sequence; + if (check.validFrom <= chainedExpiration) + chainedExpiration = check.validUntil; + else + break; + } + // Earliest - if (!res || p.second.expiration < *res) - res = p.second.expiration; + if (!res || chainedExpiration < *res) + { + res = chainedExpiration; + } } return res; } @@ -732,32 +1549,61 @@ ValidatorList::getJson() const (res[jss::local_static_keys] = Json::arrayValue); if (auto it = publisherLists_.find(local); it != publisherLists_.end()) { - for (auto const& key : it->second.list) + for (auto const& key : it->second.current.list) jLocalStaticKeys.append(toBase58(TokenType::NodePublic, key)); } // Publisher lists Json::Value& jPublisherLists = (res[jss::publisher_lists] = Json::arrayValue); - for (auto const& p : publisherLists_) + for (auto const& [publicKey, pubCollection] : publisherLists_) { - if (local == p.first) + if (local == publicKey) continue; Json::Value& curr = jPublisherLists.append(Json::objectValue); - curr[jss::pubkey_publisher] = strHex(p.first); - curr[jss::available] = p.second.available; - curr[jss::uri] = p.second.siteUri; - if (p.second.expiration != TimeKeeper::time_point{}) + curr[jss::pubkey_publisher] = strHex(publicKey); + curr[jss::available] = + pubCollection.status == PublisherStatus::available; + + auto appendList = [](PublisherList const& publisherList, + Json::Value& target) { + target[jss::uri] = publisherList.siteUri; + if (publisherList.validUntil != TimeKeeper::time_point{}) + { + target[jss::seq] = + static_cast(publisherList.sequence); + target[jss::expiration] = to_string(publisherList.validUntil); + } + if (publisherList.validFrom != TimeKeeper::time_point{}) + target[jss::effective] = to_string(publisherList.validFrom); + Json::Value& keys = (target[jss::list] = Json::arrayValue); + for (auto const& key : publisherList.list) + { + keys.append(toBase58(TokenType::NodePublic, key)); + } + }; { - curr[jss::seq] = static_cast(p.second.sequence); - curr[jss::expiration] = to_string(p.second.expiration); - curr[jss::version] = requiredListVersion; + auto const& current = pubCollection.current; + appendList(current, curr); + if (current.validUntil != TimeKeeper::time_point{}) + { + curr[jss::version] = pubCollection.rawVersion; + } } - Json::Value& keys = (curr[jss::list] = Json::arrayValue); - for (auto const& key : p.second.list) + + Json::Value remaining(Json::arrayValue); + for (auto const& [sequence, future] : pubCollection.remaining) { - keys.append(toBase58(TokenType::NodePublic, key)); + using namespace std::chrono_literals; + + (void)sequence; + Json::Value& r = remaining.append(Json::objectValue); + appendList(future, r); + // Race conditions can happen, so make this check "fuzzy" + assert(future.validFrom > timeKeeper_.now() + 600s); } + if (remaining.size()) + curr[jss::remaining] = std::move(remaining); } // Trusted validator keys @@ -804,34 +1650,36 @@ ValidatorList::for_each_listed( } void -ValidatorList::for_each_available(std::function func) const +ValidatorList::for_each_available( + std::function const& blobInfos, + PublicKey const& pubKey, + std::size_t maxSequence, + uint256 const& hash)> func) const { std::shared_lock read_lock{mutex_}; - for (auto const& [key, pl] : publisherLists_) + for (auto const& [key, plCollection] : publisherLists_) { - if (!pl.available) + if (plCollection.status != PublisherStatus::available || key.empty()) continue; + assert(plCollection.maxSequence); func( - pl.rawManifest, - pl.rawBlob, - pl.rawSignature, - pl.rawVersion, + plCollection.rawManifest, + plCollection.rawVersion, + buildBlobInfos(plCollection), key, - pl.sequence, - pl.hash); + plCollection.maxSequence.value_or(0), + plCollection.fullHash); } } boost::optional -ValidatorList::getAvailable(boost::beast::string_view const& pubKey) +ValidatorList::getAvailable( + boost::beast::string_view const& pubKey, + boost::optional forceVersion /* = {} */) { std::shared_lock read_lock{mutex_}; @@ -846,18 +1694,14 @@ ValidatorList::getAvailable(boost::beast::string_view const& pubKey) auto id = PublicKey(makeSlice(*keyBlob)); - auto iter = publisherLists_.find(id); + auto const iter = publisherLists_.find(id); - if (iter == publisherLists_.end() || !iter->second.available) + if (iter == publisherLists_.end() || + iter->second.status != PublisherStatus::available) return {}; - Json::Value value(Json::objectValue); - - value[jss::public_key] = std::string{pubKey}; - value[jss::manifest] = iter->second.rawManifest; - value[jss::blob] = iter->second.rawBlob; - value[jss::signature] = iter->second.rawSignature; - value[jss::version] = iter->second.rawVersion; + Json::Value value = + buildFileData(std::string{pubKey}, iter->second, forceVersion, j_); return value; } @@ -872,7 +1716,7 @@ ValidatorList::calculateQuorum( // publishers are available for (auto const& list : publisherLists_) { - if (!list.second.available) + if (list.second.status != PublisherStatus::available) return std::numeric_limits::max(); } @@ -881,24 +1725,24 @@ ValidatorList::calculateQuorum( // // Theorem 8 of the Analysis of the XRP Ledger Consensus Protocol // (https://arxiv.org/abs/1802.07242) says: - // XRP LCP guarantees fork safety if Oi,j > nj/2 + ni − qi + ti,j for - // every pair of nodes Pi, Pj. + // XRP LCP guarantees fork safety if Oi,j > nj/2 + ni − qi + ti,j + // for every pair of nodes Pi, Pj. // // ni: size of Pi's UNL // nj: size of Pj's UNL // Oi,j: number of validators in both UNLs // qi: validation quorum for Pi's UNL - // ti, tj: maximum number of allowed Byzantine faults in Pi and Pj's UNLs - // ti,j: min{ti, tj, Oi,j} + // ti, tj: maximum number of allowed Byzantine faults in Pi and Pj's + // UNLs ti,j: min{ti, tj, Oi,j} // // Assume ni < nj, meaning and ti,j = ti // // For qi = .8*ni, we make ti <= .2*ni - // (We could make ti lower and tolerate less UNL overlap. However in order - // to prioritize safety over liveness, we need ti >= ni - qi) + // (We could make ti lower and tolerate less UNL overlap. However in + // order to prioritize safety over liveness, we need ti >= ni - qi) // - // An 80% quorum allows two UNLs to safely have < .2*ni unique validators - // between them: + // An 80% quorum allows two UNLs to safely have < .2*ni unique + // validators between them: // // pi = ni - Oi,j // pj = nj - Oi,j @@ -907,14 +1751,15 @@ ValidatorList::calculateQuorum( // ni - pi > (ni - pi + pj)/2 + ni − .8*ni + .2*ni // pi + pj < .2*ni // - // Note that the negative UNL protocol introduced the AbsoluteMinimumQuorum - // which is 60% of the original UNL size. The effective quorum should - // not be lower than it. + // Note that the negative UNL protocol introduced the + // AbsoluteMinimumQuorum which is 60% of the original UNL size. The + // effective quorum should not be lower than it. auto quorum = static_cast(std::max( std::ceil(effectiveUnlSize * 0.8f), std::ceil(unlSize * 0.6f))); - // Use lower quorum specified via command line if the normal quorum appears - // unreachable based on the number of recently received validations. + // Use lower quorum specified via command line if the normal quorum + // appears unreachable based on the number of recently received + // validations. if (minimumQuorum_ && *minimumQuorum_ < quorum && seenSize < quorum) { quorum = *minimumQuorum_; @@ -927,17 +1772,87 @@ ValidatorList::calculateQuorum( } TrustChanges -ValidatorList::updateTrusted(hash_set const& seenValidators) +ValidatorList::updateTrusted( + hash_set const& seenValidators, + NetClock::time_point closeTime, + NetworkOPs& ops, + Overlay& overlay, + HashRouter& hashRouter) { - std::unique_lock lock{mutex_}; + using namespace std::chrono_literals; + if (timeKeeper_.now() > closeTime + 30s) + closeTime = timeKeeper_.now(); - // Remove any expired published lists - for (auto const& list : publisherLists_) + std::lock_guard lock{mutex_}; + + // Rotate pending and remove expired published lists + bool good = true; + for (auto& [pubKey, collection] : publisherLists_) { - if (list.second.available && - list.second.expiration <= timeKeeper_.now()) - removePublisherList(lock, list.first); + { + auto& remaining = collection.remaining; + auto const firstIter = remaining.begin(); + auto iter = firstIter; + if (iter != remaining.end() && iter->second.validFrom <= closeTime) + { + // Find the LAST candidate that is ready to go live. + for (auto next = std::next(iter); next != remaining.end() && + next->second.validFrom <= closeTime; + ++iter, ++next) + { + assert(std::next(iter) == next); + } + assert(iter != remaining.end()); + + // Rotate the pending list in to current + auto sequence = iter->first; + auto& candidate = iter->second; + auto& current = collection.current; + assert(candidate.validFrom <= closeTime); + + auto const oldList = current.list; + current = std::move(candidate); + if (collection.status != PublisherStatus::available) + collection.status = PublisherStatus::available; + assert(current.sequence == sequence); + // If the list is expired, remove the validators so they don't + // get processed in. The expiration check below will do the rest + // of the work + if (current.validUntil <= closeTime) + current.list.clear(); + + updatePublisherList(pubKey, current, oldList, lock); + + // Only broadcast the current, which will consequently only + // send to peers that don't understand v2, or which are + // unknown (unlikely). Those that do understand v2 should + // already have this list and are in the process of + // switching themselves. + broadcastBlobs( + pubKey, + collection, + sequence, + current.hash, + overlay, + hashRouter, + j_); + + // Erase any candidates that we skipped over, plus this one + remaining.erase(firstIter, std::next(iter)); + } + } + // Remove if expired + if (collection.status == PublisherStatus::available && + collection.current.validUntil <= closeTime) + { + removePublisherList(lock, pubKey, PublisherStatus::expired); + ops.setUNLBlocked(); + } + if (collection.status != PublisherStatus::available) + good = false; } + if (good) + ops.clearUNLBlocked(); TrustChanges trustChanges; @@ -962,7 +1877,8 @@ ValidatorList::updateTrusted(hash_set const& seenValidators) trustChanges.added.insert(calcNodeID(val.first)); } - // If there were any changes, we need to update the ephemeral signing keys: + // If there were any changes, we need to update the ephemeral signing + // keys: if (!trustChanges.added.empty() || !trustChanges.removed.empty()) { trustedSigningKeys_.clear(); @@ -975,7 +1891,7 @@ ValidatorList::updateTrusted(hash_set const& seenValidators) << trustedMasterKeys_.size() << " of " << keyListings_.size() << " listed validators eligible for inclusion in the trusted set"; - auto unlSize = trustedMasterKeys_.size(); + auto const unlSize = trustedMasterKeys_.size(); auto effectiveUnlSize = unlSize; auto seenSize = seenValidators.size(); if (!negativeUNL_.empty()) @@ -999,15 +1915,21 @@ ValidatorList::updateTrusted(hash_set const& seenValidators) quorum_ = calculateQuorum(unlSize, effectiveUnlSize, seenSize); JLOG(j_.debug()) << "Using quorum of " << quorum_ << " for new set of " - << trustedMasterKeys_.size() << " trusted validators (" + << unlSize << " trusted validators (" << trustChanges.added.size() << " added, " << trustChanges.removed.size() << " removed)"; - if (trustedMasterKeys_.size() < quorum_) + if (unlSize < quorum_) { JLOG(j_.warn()) << "New quorum of " << quorum_ << " exceeds the number of trusted validators (" - << trustedMasterKeys_.size() << ")"; + << unlSize << ")"; + } + + if (publisherLists_.size() && unlSize == 0) + { + // No validators. Lock down. + ops.setUNLBlocked(); } return trustChanges; @@ -1030,7 +1952,7 @@ ValidatorList::getNegativeUNL() const void ValidatorList::setNegativeUNL(hash_set const& negUnl) { - std::unique_lock lock{mutex_}; + std::lock_guard lock{mutex_}; negativeUNL_ = negUnl; } diff --git a/src/ripple/app/misc/impl/ValidatorSite.cpp b/src/ripple/app/misc/impl/ValidatorSite.cpp index 17d05e2b0..e26ce7a87 100644 --- a/src/ripple/app/misc/impl/ValidatorSite.cpp +++ b/src/ripple/app/misc/impl/ValidatorSite.cpp @@ -355,78 +355,116 @@ ValidatorSite::parseJsonResponse( std::size_t siteIdx, std::lock_guard& sites_lock) { - Json::Reader r; - Json::Value body; - if (!r.parse(res.data(), body)) - { - JLOG(j_.warn()) << "Unable to parse JSON response from " - << sites_[siteIdx].activeResource->uri; - throw std::runtime_error{"bad json"}; - } + Json::Value const body = [&res, siteIdx, this]() { + Json::Reader r; + Json::Value body; + if (!r.parse(res.data(), body)) + { + JLOG(j_.warn()) << "Unable to parse JSON response from " + << sites_[siteIdx].activeResource->uri; + throw std::runtime_error{"bad json"}; + } + return body; + }(); - if (!body.isObject() || !body.isMember("blob") || - !body["blob"].isString() || !body.isMember("manifest") || - !body["manifest"].isString() || !body.isMember("signature") || - !body["signature"].isString() || !body.isMember("version") || - !body["version"].isInt()) + auto const [valid, version, blobs] = [&body]() { + // Check the easy fields first + bool valid = body.isObject() && body.isMember(jss::manifest) && + body[jss::manifest].isString() && body.isMember(jss::version) && + body[jss::version].isInt(); + // Check the version-specific blob & signature fields + std::uint32_t version; + std::vector blobs; + if (valid) + { + version = body[jss::version].asUInt(); + blobs = ValidatorList::parseBlobs(version, body); + valid = !blobs.empty(); + } + return std::make_tuple(valid, version, blobs); + }(); + + if (!valid) { JLOG(j_.warn()) << "Missing fields in JSON response from " << sites_[siteIdx].activeResource->uri; throw std::runtime_error{"missing fields"}; } - auto const manifest = body["manifest"].asString(); - auto const blob = body["blob"].asString(); - auto const signature = body["signature"].asString(); - auto const version = body["version"].asUInt(); + auto const manifest = body[jss::manifest].asString(); + assert(version == body[jss::version].asUInt()); auto const& uri = sites_[siteIdx].activeResource->uri; - auto const hash = sha512Half(manifest, blob, signature, version); - auto const applyResult = app_.validators().applyListAndBroadcast( + auto const hash = sha512Half(manifest, blobs, version); + auto const applyResult = app_.validators().applyListsAndBroadcast( manifest, - blob, - signature, version, + blobs, uri, hash, app_.overlay(), - app_.getHashRouter()); - auto const disp = applyResult.disposition; + app_.getHashRouter(), + app_.getOPs()); sites_[siteIdx].lastRefreshStatus.emplace( - Site::Status{clock_type::now(), disp, ""}); + Site::Status{clock_type::now(), applyResult.bestDisposition(), ""}); - switch (disp) + for (auto const [disp, count] : applyResult.dispositions) { - case ListDisposition::accepted: - JLOG(j_.debug()) << "Applied new validator list from " << uri; - break; - case ListDisposition::same_sequence: - JLOG(j_.debug()) - << "Validator list with current sequence from " << uri; - break; - case ListDisposition::stale: - JLOG(j_.warn()) << "Stale validator list from " << uri; - break; - case ListDisposition::untrusted: - JLOG(j_.warn()) << "Untrusted validator list from " << uri; - break; - case ListDisposition::invalid: - JLOG(j_.warn()) << "Invalid validator list from " << uri; - break; - case ListDisposition::unsupported_version: - JLOG(j_.warn()) - << "Unsupported version validator list from " << uri; - break; - default: - BOOST_ASSERT(false); + switch (disp) + { + case ListDisposition::accepted: + JLOG(j_.debug()) << "Applied " << count + << " new validator list(s) from " << uri; + break; + case ListDisposition::expired: + JLOG(j_.debug()) << "Applied " << count + << " expired validator list(s) from " << uri; + break; + case ListDisposition::same_sequence: + JLOG(j_.debug()) + << "Ignored " << count + << " validator list(s) with current sequence from " << uri; + break; + case ListDisposition::pending: + JLOG(j_.debug()) << "Processed " << count + << " future validator list(s) from " << uri; + break; + case ListDisposition::known_sequence: + JLOG(j_.debug()) + << "Ignored " << count + << " validator list(s) with future known sequence from " + << uri; + break; + case ListDisposition::stale: + JLOG(j_.warn()) << "Ignored " << count + << "stale validator list(s) from " << uri; + break; + case ListDisposition::untrusted: + JLOG(j_.warn()) << "Ignored " << count + << " untrusted validator list(s) from " << uri; + break; + case ListDisposition::invalid: + JLOG(j_.warn()) << "Ignored " << count + << " invalid validator list(s) from " << uri; + break; + case ListDisposition::unsupported_version: + JLOG(j_.warn()) + << "Ignored " << count + << " unsupported version validator list(s) from " << uri; + break; + default: + BOOST_ASSERT(false); + } } - if (body.isMember("refresh_interval") && - body["refresh_interval"].isNumeric()) + if (body.isMember(jss::refresh_interval) && + body[jss::refresh_interval].isNumeric()) { using namespace std::chrono_literals; std::chrono::minutes const refresh = boost::algorithm::clamp( - std::chrono::minutes{body["refresh_interval"].asUInt()}, 1min, 24h); + std::chrono::minutes{body[jss::refresh_interval].asUInt()}, + 1min, + 24h); sites_[siteIdx].refreshInterval = refresh; sites_[siteIdx].nextRefresh = clock_type::now() + sites_[siteIdx].refreshInterval; diff --git a/src/ripple/overlay/Message.h b/src/ripple/overlay/Message.h index 5ce665858..533001dca 100644 --- a/src/ripple/overlay/Message.h +++ b/src/ripple/overlay/Message.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_OVERLAY_MESSAGE_H_INCLUDED #define RIPPLE_OVERLAY_MESSAGE_H_INCLUDED +#include #include #include #include @@ -34,6 +35,8 @@ namespace ripple { +constexpr std::size_t maximiumMessageSize = megabytes(64); + // VFALCO NOTE If we forward declare Message and write out shared_ptr // instead of using the in-class type alias, we can remove the // entire ripple.pb.h from the main headers. @@ -68,6 +71,12 @@ public: std::size_t getBufferSize(); + static std::size_t + messageSize(::google::protobuf::Message const& message); + + static std::size_t + totalSize(::google::protobuf::Message const& message); + /** Retrieve the packed message data. If compressed message is requested but * the message is not compressible then the uncompressed buffer is returned. * @param compressed Request compressed (Compress::On) or diff --git a/src/ripple/overlay/Peer.h b/src/ripple/overlay/Peer.h index 1f5aad376..105670464 100644 --- a/src/ripple/overlay/Peer.h +++ b/src/ripple/overlay/Peer.h @@ -37,6 +37,7 @@ static constexpr std::uint32_t csHopLimit = 3; enum class ProtocolFeature { ValidatorListPropagation, + ValidatorList2Propagation }; /** Represents a peer connection in the overlay. */ diff --git a/src/ripple/overlay/impl/Message.cpp b/src/ripple/overlay/impl/Message.cpp index d5aec1a2d..fb6b3a6dd 100644 --- a/src/ripple/overlay/impl/Message.cpp +++ b/src/ripple/overlay/impl/Message.cpp @@ -32,11 +32,7 @@ Message::Message( { using namespace ripple::compression; -#if defined(GOOGLE_PROTOBUF_VERSION) && (GOOGLE_PROTOBUF_VERSION >= 3011000) - auto const messageBytes = message.ByteSizeLong(); -#else - unsigned const messageBytes = message.ByteSize(); -#endif + auto const messageBytes = messageSize(message); assert(messageBytes != 0); @@ -46,6 +42,26 @@ Message::Message( if (messageBytes != 0) message.SerializeToArray(buffer_.data() + headerBytes, messageBytes); + + assert(getBufferSize() == totalSize(message)); +} + +// static +std::size_t +Message::messageSize(::google::protobuf::Message const& message) +{ +#if defined(GOOGLE_PROTOBUF_VERSION) && (GOOGLE_PROTOBUF_VERSION >= 3011000) + return message.ByteSizeLong(); +#else + return message.ByteSize(); +#endif +} + +// static +std::size_t +Message::totalSize(::google::protobuf::Message const& message) +{ + return messageSize(message) + compression::headerBytes; } void @@ -68,6 +84,7 @@ Message::compress() case protocol::mtLEDGER_DATA: case protocol::mtGET_OBJECTS: case protocol::mtVALIDATORLIST: + case protocol::mtVALIDATORLISTCOLLECTION: return true; case protocol::mtPING: case protocol::mtCLUSTER: diff --git a/src/ripple/overlay/impl/OverlayImpl.cpp b/src/ripple/overlay/impl/OverlayImpl.cpp index 489a69ed7..44973464a 100644 --- a/src/ripple/overlay/impl/OverlayImpl.cpp +++ b/src/ripple/overlay/impl/OverlayImpl.cpp @@ -1026,13 +1026,7 @@ OverlayImpl::processValidatorList( if (!req.target().starts_with(prefix.data()) || !setup_.vlEnabled) return false; - auto key = req.target().substr(prefix.size()); - - if (key.empty()) - return false; - - // find the list - auto vl = app_.validators().getAvailable(key); + std::uint32_t version = 1; boost::beast::http::response msg; msg.version(req.version()); @@ -1040,24 +1034,52 @@ OverlayImpl::processValidatorList( msg.insert("Content-Type", "application/json"); msg.insert("Connection", "close"); - if (!vl) - { - // 404 not found - msg.result(boost::beast::http::status::not_found); + auto fail = [&msg, &handoff](auto status) { + msg.result(status); msg.insert("Content-Length", "0"); msg.body() = Json::nullValue; + + msg.prepare_payload(); + handoff.response = std::make_shared(msg); + return true; + }; + + auto key = req.target().substr(prefix.size()); + + if (auto slash = key.find('/'); slash != boost::string_view::npos) + { + auto verString = key.substr(0, slash); + if (!boost::conversion::try_lexical_convert(verString, version)) + return fail(boost::beast::http::status::bad_request); + key = key.substr(slash + 1); + } + + if (key.empty()) + return fail(boost::beast::http::status::bad_request); + + // find the list + auto vl = app_.validators().getAvailable(key, version); + + if (!vl) + { + // 404 not found + return fail(boost::beast::http::status::not_found); + } + else if (!*vl) + { + return fail(boost::beast::http::status::bad_request); } else { msg.result(boost::beast::http::status::ok); msg.body() = *vl; - } - msg.prepare_payload(); - handoff.response = std::make_shared(msg); - return true; + msg.prepare_payload(); + handoff.response = std::make_shared(msg); + return true; + } } bool diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index edf024ca9..364884d9c 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -433,6 +433,8 @@ PeerImp::supportsFeature(ProtocolFeature f) const { case ProtocolFeature::ValidatorListPropagation: return protocol_ >= make_protocol(2, 1); + case ProtocolFeature::ValidatorList2Propagation: + return protocol_ >= make_protocol(2, 2); } return false; } @@ -817,29 +819,27 @@ PeerImp::doProtocolStart() // Send all the validator lists that have been loaded if (supportsFeature(ProtocolFeature::ValidatorListPropagation)) { - app_.validators().for_each_available([&](std::string const& manifest, - std::string const& blob, - std::string const& signature, - std::uint32_t version, - PublicKey const& pubKey, - std::size_t sequence, - uint256 const& hash) { - protocol::TMValidatorList vl; + app_.validators().for_each_available( + [&](std::string const& manifest, + std::uint32_t version, + std::map const& blobInfos, + PublicKey const& pubKey, + std::size_t maxSequence, + uint256 const& hash) { + ValidatorList::sendValidatorList( + *this, + 0, + pubKey, + maxSequence, + version, + manifest, + blobInfos, + app_.getHashRouter(), + p_journal_); - vl.set_manifest(manifest); - vl.set_blob(blob); - vl.set_signature(signature); - vl.set_version(version); - - JLOG(p_journal_.debug()) - << "Sending validator list for " << strHex(pubKey) - << " with sequence " << sequence << " to " - << remote_address_.to_string() << " (" << id_ << ")"; - send(std::make_shared(vl, protocol::mtVALIDATORLIST)); - // Don't send it next time. - app_.getHashRouter().addSuppressionPeer(hash, id_); - setPublisherListSequence(pubKey, sequence); - }); + // Don't send it next time. + app_.getHashRouter().addSuppressionPeer(hash, id_); + }); } if (auto m = overlay_.getManifestsMessage()) @@ -1859,6 +1859,202 @@ PeerImp::onMessage(std::shared_ptr const& m) } } +void +PeerImp::onValidatorListMessage( + std::string const& messageType, + std::string const& manifest, + std::uint32_t version, + std::vector const& blobs) +{ + // If there are no blobs, the message is malformed (possibly because of + // ValidatorList class rules), so charge accordingly and skip processing. + if (blobs.empty()) + { + JLOG(p_journal_.warn()) << "Ignored malformed " << messageType + << " from peer " << remote_address_; + // This shouldn't ever happen with a well-behaved peer + fee_ = Resource::feeHighBurdenPeer; + return; + } + + auto const hash = sha512Half(manifest, blobs, version); + + JLOG(p_journal_.debug()) + << "Received " << messageType << " from " << remote_address_.to_string() + << " (" << id_ << ")"; + + if (!app_.getHashRouter().addSuppressionPeer(hash, id_)) + { + JLOG(p_journal_.debug()) + << messageType << ": received duplicate " << messageType; + // Charging this fee here won't hurt the peer in the normal + // course of operation (ie. refresh every 5 minutes), but + // will add up if the peer is misbehaving. + fee_ = Resource::feeUnwantedData; + return; + } + + auto const applyResult = app_.validators().applyListsAndBroadcast( + manifest, + version, + blobs, + remote_address_.to_string(), + hash, + app_.overlay(), + app_.getHashRouter(), + app_.getOPs()); + + JLOG(p_journal_.debug()) + << "Processed " << messageType << " version " << version << " from " + << (applyResult.publisherKey ? strHex(*applyResult.publisherKey) + : "unknown or invalid publisher") + << " from " << remote_address_.to_string() << " (" << id_ + << ") with best result " << to_string(applyResult.bestDisposition()); + + // Act based on the best result + switch (applyResult.bestDisposition()) + { + // New list + case ListDisposition::accepted: + // Newest list is expired, and that needs to be broadcast, too + case ListDisposition::expired: + // Future list + case ListDisposition::pending: { + std::lock_guard sl(recentLock_); + + assert(applyResult.publisherKey); + auto const& pubKey = *applyResult.publisherKey; +#ifndef NDEBUG + if (auto const iter = publisherListSequences_.find(pubKey); + iter != publisherListSequences_.end()) + { + assert(iter->second < applyResult.sequence); + } +#endif + publisherListSequences_[pubKey] = applyResult.sequence; + } + break; + case ListDisposition::same_sequence: + case ListDisposition::known_sequence: +#ifndef NDEBUG + { + std::lock_guard sl(recentLock_); + assert(applyResult.sequence && applyResult.publisherKey); + assert( + publisherListSequences_[*applyResult.publisherKey] <= + applyResult.sequence); + } +#endif // !NDEBUG + + break; + case ListDisposition::stale: + case ListDisposition::untrusted: + case ListDisposition::invalid: + case ListDisposition::unsupported_version: + break; + default: + assert(false); + } + + // Charge based on the worst result + switch (applyResult.worstDisposition()) + { + case ListDisposition::accepted: + case ListDisposition::expired: + case ListDisposition::pending: + // No charges for good data + break; + case ListDisposition::same_sequence: + case ListDisposition::known_sequence: + // Charging this fee here won't hurt the peer in the normal + // course of operation (ie. refresh every 5 minutes), but + // will add up if the peer is misbehaving. + fee_ = Resource::feeUnwantedData; + break; + case ListDisposition::stale: + // There are very few good reasons for a peer to send an + // old list, particularly more than once. + fee_ = Resource::feeBadData; + break; + case ListDisposition::untrusted: + // Charging this fee here won't hurt the peer in the normal + // course of operation (ie. refresh every 5 minutes), but + // will add up if the peer is misbehaving. + fee_ = Resource::feeUnwantedData; + break; + case ListDisposition::invalid: + // This shouldn't ever happen with a well-behaved peer + fee_ = Resource::feeInvalidSignature; + break; + case ListDisposition::unsupported_version: + // During a version transition, this may be legitimate. + // If it happens frequently, that's probably bad. + fee_ = Resource::feeBadData; + break; + default: + assert(false); + } + + // Log based on all the results. + for (auto const [disp, count] : applyResult.dispositions) + { + switch (disp) + { + // New list + case ListDisposition::accepted: + JLOG(p_journal_.debug()) + << "Applied " << count << " new " << messageType + << "(s) from peer " << remote_address_; + break; + // Newest list is expired, and that needs to be broadcast, too + case ListDisposition::expired: + JLOG(p_journal_.debug()) + << "Applied " << count << " expired " << messageType + << "(s) from peer " << remote_address_; + break; + // Future list + case ListDisposition::pending: + JLOG(p_journal_.debug()) + << "Processed " << count << " future " << messageType + << "(s) from peer " << remote_address_; + break; + case ListDisposition::same_sequence: + JLOG(p_journal_.warn()) + << "Ignored " << count << " " << messageType + << "(s) with current sequence from peer " + << remote_address_; + break; + case ListDisposition::known_sequence: + JLOG(p_journal_.warn()) + << "Ignored " << count << " " << messageType + << "(s) with future sequence from peer " << remote_address_; + break; + case ListDisposition::stale: + JLOG(p_journal_.warn()) + << "Ignored " << count << "stale " << messageType + << "(s) from peer " << remote_address_; + break; + case ListDisposition::untrusted: + JLOG(p_journal_.warn()) + << "Ignored " << count << " untrusted " << messageType + << "(s) from peer " << remote_address_; + break; + case ListDisposition::unsupported_version: + JLOG(p_journal_.warn()) + << "Ignored " << count << "unsupported version " + << messageType << "(s) from peer " << remote_address_; + break; + case ListDisposition::invalid: + JLOG(p_journal_.warn()) + << "Ignored " << count << "invalid " << messageType + << "(s) from peer " << remote_address_; + break; + default: + assert(false); + } + } +} + void PeerImp::onMessage(std::shared_ptr const& m) { @@ -1873,117 +2069,11 @@ PeerImp::onMessage(std::shared_ptr const& m) fee_ = Resource::feeUnwantedData; return; } - auto const& manifest = m->manifest(); - auto const& blob = m->blob(); - auto const& signature = m->signature(); - auto const version = m->version(); - auto const hash = sha512Half(manifest, blob, signature, version); - - JLOG(p_journal_.debug()) - << "Received validator list from " << remote_address_.to_string() - << " (" << id_ << ")"; - - if (!app_.getHashRouter().addSuppressionPeer(hash, id_)) - { - JLOG(p_journal_.debug()) - << "ValidatorList: received duplicate validator list"; - // Charging this fee here won't hurt the peer in the normal - // course of operation (ie. refresh every 5 minutes), but - // will add up if the peer is misbehaving. - fee_ = Resource::feeUnwantedData; - return; - } - - auto const applyResult = app_.validators().applyListAndBroadcast( - manifest, - blob, - signature, - version, - remote_address_.to_string(), - hash, - app_.overlay(), - app_.getHashRouter()); - auto const disp = applyResult.disposition; - - JLOG(p_journal_.debug()) - << "Processed validator list from " - << (applyResult.publisherKey ? strHex(*applyResult.publisherKey) - : "unknown or invalid publisher") - << " from " << remote_address_.to_string() << " (" << id_ - << ") with result " << to_string(disp); - - switch (disp) - { - case ListDisposition::accepted: - JLOG(p_journal_.debug()) - << "Applied new validator list from peer " - << remote_address_; - { - std::lock_guard sl(recentLock_); - - assert(applyResult.sequence && applyResult.publisherKey); - auto const& pubKey = *applyResult.publisherKey; -#ifndef NDEBUG - if (auto const iter = publisherListSequences_.find(pubKey); - iter != publisherListSequences_.end()) - { - assert(iter->second < *applyResult.sequence); - } -#endif - publisherListSequences_[pubKey] = *applyResult.sequence; - } - break; - case ListDisposition::same_sequence: - JLOG(p_journal_.warn()) - << "Validator list with current sequence from peer " - << remote_address_; - // Charging this fee here won't hurt the peer in the normal - // course of operation (ie. refresh every 5 minutes), but - // will add up if the peer is misbehaving. - fee_ = Resource::feeUnwantedData; -#ifndef NDEBUG - { - std::lock_guard sl(recentLock_); - assert(applyResult.sequence && applyResult.publisherKey); - assert( - publisherListSequences_[*applyResult.publisherKey] == - *applyResult.sequence); - } -#endif // !NDEBUG - - break; - case ListDisposition::stale: - JLOG(p_journal_.warn()) - << "Stale validator list from peer " << remote_address_; - // There are very few good reasons for a peer to send an - // old list, particularly more than once. - fee_ = Resource::feeBadData; - break; - case ListDisposition::untrusted: - JLOG(p_journal_.warn()) - << "Untrusted validator list from peer " << remote_address_; - // Charging this fee here won't hurt the peer in the normal - // course of operation (ie. refresh every 5 minutes), but - // will add up if the peer is misbehaving. - fee_ = Resource::feeUnwantedData; - break; - case ListDisposition::invalid: - JLOG(p_journal_.warn()) - << "Invalid validator list from peer " << remote_address_; - // This shouldn't ever happen with a well-behaved peer - fee_ = Resource::feeInvalidSignature; - break; - case ListDisposition::unsupported_version: - JLOG(p_journal_.warn()) - << "Unsupported version validator list from peer " - << remote_address_; - // During a version transition, this may be legitimate. - // If it happens frequently, that's probably bad. - fee_ = Resource::feeBadData; - break; - default: - assert(false); - } + onValidatorListMessage( + "ValidatorList", + m->manifest(), + m->version(), + ValidatorList::parseBlobs(*m)); } catch (std::exception const& e) { @@ -1993,6 +2083,45 @@ PeerImp::onMessage(std::shared_ptr const& m) } } +void +PeerImp::onMessage( + std::shared_ptr const& m) +{ + try + { + if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation)) + { + JLOG(p_journal_.debug()) + << "ValidatorListCollection: received validator list from peer " + << "using protocol version " << to_string(protocol_) + << " which shouldn't support this feature."; + fee_ = Resource::feeUnwantedData; + return; + } + else if (m->version() < 2) + { + JLOG(p_journal_.debug()) + << "ValidatorListCollection: received invalid validator list " + "version " + << m->version() << " from peer using protocol version " + << to_string(protocol_); + fee_ = Resource::feeBadData; + return; + } + onValidatorListMessage( + "ValidatorListCollection", + m->manifest(), + m->version(), + ValidatorList::parseBlobs(*m)); + } + catch (std::exception const& e) + { + JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, " + << e.what() << " from peer " << remote_address_; + fee_ = Resource::feeBadData; + } +} + void PeerImp::onMessage(std::shared_ptr const& m) { diff --git a/src/ripple/overlay/impl/PeerImp.h b/src/ripple/overlay/impl/PeerImp.h index 77baca9e2..35fafb581 100644 --- a/src/ripple/overlay/impl/PeerImp.h +++ b/src/ripple/overlay/impl/PeerImp.h @@ -43,6 +43,8 @@ namespace ripple { +struct ValidatorBlobInfo; + class PeerImp : public Peer, public std::enable_shared_from_this, public OverlayImpl::Child @@ -509,6 +511,8 @@ public: void onMessage(std::shared_ptr const& m); void + onMessage(std::shared_ptr const& m); + void onMessage(std::shared_ptr const& m); void onMessage(std::shared_ptr const& m); @@ -527,6 +531,13 @@ private: void doFetchPack(const std::shared_ptr& packet); + void + onValidatorListMessage( + std::string const& messageType, + std::string const& manifest, + std::uint32_t version, + std::vector const& blobs); + void checkTransaction( int flags, diff --git a/src/ripple/overlay/impl/ProtocolMessage.h b/src/ripple/overlay/impl/ProtocolMessage.h index 0d28ebd7c..745016d74 100644 --- a/src/ripple/overlay/impl/ProtocolMessage.h +++ b/src/ripple/overlay/impl/ProtocolMessage.h @@ -73,6 +73,8 @@ protocolMessageName(int type) return "have_set"; case protocol::mtVALIDATORLIST: return "validator_list"; + case protocol::mtVALIDATORLISTCOLLECTION: + return "validator_list_collection"; case protocol::mtVALIDATION: return "validation"; case protocol::mtGET_OBJECTS: @@ -222,11 +224,10 @@ parseMessageHeader( template < class T, class Buffers, - class Handler, class = std::enable_if_t< std::is_base_of<::google::protobuf::Message, T>::value>> -bool -invoke(MessageHeader const& header, Buffers const& buffers, Handler& handler) +std::shared_ptr +parseMessageContent(MessageHeader const& header, Buffers const& buffers) { auto const m = std::make_shared(); @@ -246,9 +247,25 @@ invoke(MessageHeader const& header, Buffers const& buffers, Handler& handler) header.algorithm); if (payloadSize == 0 || !m->ParseFromArray(payload.data(), payloadSize)) - return false; + return {}; } else if (!m->ParseFromZeroCopyStream(&stream)) + return {}; + + return m; +} + +template < + class T, + class Buffers, + class Handler, + class = std::enable_if_t< + std::is_base_of<::google::protobuf::Message, T>::value>> +bool +invoke(MessageHeader const& header, Buffers const& buffers, Handler& handler) +{ + auto const m = parseMessageContent(header, buffers); + if (!m) return false; handler.onMessageBegin(header.message_type, m, header.payload_wire_size); @@ -300,8 +317,8 @@ invokeProtocolMessage( // whose size exceeds this may result in the connection being dropped. A // larger message size may be supported in the future or negotiated as // part of a protocol upgrade. - if (header->payload_wire_size > megabytes(64) || - header->uncompressed_size > megabytes(64)) + if (header->payload_wire_size > maximiumMessageSize || + header->uncompressed_size > maximiumMessageSize) { result.second = make_error_code(boost::system::errc::message_size); return result; @@ -391,6 +408,10 @@ invokeProtocolMessage( success = detail::invoke( *header, buffers, handler); break; + case protocol::mtVALIDATORLISTCOLLECTION: + success = detail::invoke( + *header, buffers, handler); + break; case protocol::mtGET_OBJECTS: success = detail::invoke( *header, buffers, handler); diff --git a/src/ripple/overlay/impl/ProtocolVersion.cpp b/src/ripple/overlay/impl/ProtocolVersion.cpp index cb35cd5b2..fade6c355 100644 --- a/src/ripple/overlay/impl/ProtocolVersion.cpp +++ b/src/ripple/overlay/impl/ProtocolVersion.cpp @@ -38,7 +38,8 @@ constexpr ProtocolVersion const supportedProtocolList[] { {1, 2}, {2, 0}, - {2, 1} + {2, 1}, + {2, 2} }; // clang-format on diff --git a/src/ripple/overlay/impl/TrafficCount.cpp b/src/ripple/overlay/impl/TrafficCount.cpp index 298052771..779b96dac 100644 --- a/src/ripple/overlay/impl/TrafficCount.cpp +++ b/src/ripple/overlay/impl/TrafficCount.cpp @@ -48,7 +48,8 @@ TrafficCount::categorize( if (type == protocol::mtTRANSACTION) return TrafficCount::category::transaction; - if (type == protocol::mtVALIDATORLIST) + if (type == protocol::mtVALIDATORLIST || + type == protocol::mtVALIDATORLISTCOLLECTION) return TrafficCount::category::validatorlist; if (type == protocol::mtVALIDATION) diff --git a/src/ripple/proto/ripple.proto b/src/ripple/proto/ripple.proto index 5de1cdf39..12fa466c6 100644 --- a/src/ripple/proto/ripple.proto +++ b/src/ripple/proto/ripple.proto @@ -24,6 +24,7 @@ enum MessageType mtPEER_SHARD_INFO = 53; mtVALIDATORLIST = 54; mtSQUELCH = 55; + mtVALIDATORLISTCOLLECTION = 56; } // token, iterations, target, challenge = issue demand for proof of work @@ -203,7 +204,7 @@ message TMHaveTransactionSet required bytes hash = 2; } -// Validator list +// Validator list (UNL) message TMValidatorList { required bytes manifest = 1; @@ -212,6 +213,22 @@ message TMValidatorList required uint32 version = 4; } +// Validator List v2 +message ValidatorBlobInfo +{ + optional bytes manifest = 1; + required bytes blob = 2; + required bytes signature = 3; +} + +// Collection of Validator List v2 (UNL) +message TMValidatorListCollection +{ + required uint32 version = 1; + required bytes manifest = 2; + repeated ValidatorBlobInfo blobs = 3; +} + // Used to sign a final closed ledger after reprocessing message TMValidation { diff --git a/src/ripple/protocol/ErrorCodes.h b/src/ripple/protocol/ErrorCodes.h index 1eb91e2b5..669256448 100644 --- a/src/ripple/protocol/ErrorCodes.h +++ b/src/ripple/protocol/ErrorCodes.h @@ -134,8 +134,9 @@ enum error_code_i { rpcDB_DESERIALIZATION = 77, rpcEXCESSIVE_LGR_RANGE = 78, rpcINVALID_LGR_RANGE = 79, + rpcEXPIRED_VALIDATOR_LIST = 80, rpcLAST = - rpcINVALID_LGR_RANGE // rpcLAST should always equal the last code.= + rpcEXPIRED_VALIDATOR_LIST // rpcLAST should always equal the last code. }; /** Codes returned in the `warnings` array of certain RPC commands. @@ -145,6 +146,7 @@ enum error_code_i { enum warning_code_i { warnRPC_UNSUPPORTED_MAJORITY = 1001, warnRPC_AMENDMENT_BLOCKED = 1002, + warnRPC_EXPIRED_VALIDATOR_LIST = 1003, }; //------------------------------------------------------------------------------ diff --git a/src/ripple/protocol/impl/ErrorCodes.cpp b/src/ripple/protocol/impl/ErrorCodes.cpp index e16bd6303..454643337 100644 --- a/src/ripple/protocol/impl/ErrorCodes.cpp +++ b/src/ripple/protocol/impl/ErrorCodes.cpp @@ -39,6 +39,7 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcAMENDMENT_BLOCKED, "amendmentBlocked", "Amendment blocked, need upgrade."}, + {rpcEXPIRED_VALIDATOR_LIST, "unlBlocked", "Validator list expired."}, {rpcATX_DEPRECATED, "deprecated", "Use the new API or specify a ledger range."}, diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index fe3234569..cf60d3033 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -143,6 +143,8 @@ JSS(bids); // out: Subscribe JSS(binary); // in: AccountTX, LedgerEntry, // AccountTxOld, Tx LedgerData JSS(blob); // out: ValidatorList +JSS(blobs_v2); // out: ValidatorList + // in: UNL JSS(books); // in: Subscribe, Unsubscribe JSS(both); // in: Subscribe, Unsubscribe JSS(both_sides); // in: Subscribe, Unsubscribe @@ -211,6 +213,8 @@ JSS(directory); // in: LedgerEntry JSS(domain); // out: ValidatorInfo, Manifest JSS(drops); // out: TxQ JSS(duration_us); // out: NetworkOPs +JSS(effective); // out: ValidatorList + // in: UNL JSS(enabled); // out: AmendmentTable JSS(engine_result); // out: NetworkOPs, TransactionSign, Submit JSS(engine_result_code); // out: NetworkOPs, TransactionSign, Submit @@ -439,8 +443,10 @@ JSS(random); // out: Random JSS(raw_meta); // out: AcceptedLedgerTx JSS(receive_currencies); // out: AccountCurrencies JSS(reference_level); // out: TxQ +JSS(refresh_interval); // in: UNL JSS(refresh_interval_min); // out: ValidatorSites JSS(regular_seed); // in/out: LedgerEntry +JSS(remaining); // out: ValidatorList JSS(remote); // out: Logic.h JSS(request); // RPC JSS(requested); // out: Manifest @@ -471,6 +477,7 @@ JSS(seq); // in: LedgerEntry; // out: NetworkOPs, RPCSub, AccountOffers, // ValidatorList, ValidatorInfo, Manifest JSS(seqNum); // out: LedgerToJson +JSS(sequence); // in: UNL JSS(sequence_count); // out: AccountInfo JSS(server_domain); // out: NetworkOPs JSS(server_state); // out: NetworkOPs diff --git a/src/ripple/rpc/impl/Handler.h b/src/ripple/rpc/impl/Handler.h index 6b6fa71e7..b93d53fa3 100644 --- a/src/ripple/rpc/impl/Handler.h +++ b/src/ripple/rpc/impl/Handler.h @@ -77,6 +77,20 @@ template error_code_i conditionMet(Condition condition_required, T& context) { + if (context.app.getOPs().isAmendmentBlocked() && + (condition_required & NEEDS_CURRENT_LEDGER || + condition_required & NEEDS_CLOSED_LEDGER)) + { + return rpcAMENDMENT_BLOCKED; + } + + if (context.app.getOPs().isUNLBlocked() && + (condition_required & NEEDS_CURRENT_LEDGER || + condition_required & NEEDS_CLOSED_LEDGER)) + { + return rpcEXPIRED_VALIDATOR_LIST; + } + if ((condition_required & NEEDS_NETWORK_CONNECTION) && (context.netOps.getOperatingMode() < OperatingMode::SYNCING)) { @@ -88,13 +102,6 @@ conditionMet(Condition condition_required, T& context) return rpcNOT_SYNCED; } - if (context.app.getOPs().isAmendmentBlocked() && - (condition_required & NEEDS_CURRENT_LEDGER || - condition_required & NEEDS_CLOSED_LEDGER)) - { - return rpcAMENDMENT_BLOCKED; - } - if (!context.app.config().standalone() && condition_required & NEEDS_CURRENT_LEDGER) { diff --git a/src/test/app/ValidatorList_test.cpp b/src/test/app/ValidatorList_test.cpp index 0eb926279..acb978fef 100644 --- a/src/test/app/ValidatorList_test.cpp +++ b/src/test/app/ValidatorList_test.cpp @@ -21,12 +21,15 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include #include namespace ripple { @@ -126,11 +129,14 @@ private: makeList( std::vector const& validators, std::size_t sequence, - std::size_t expiration) + std::size_t validUntil, + boost::optional validFrom = {}) { std::string data = "{\"sequence\":" + std::to_string(sequence) + - ",\"expiration\":" + std::to_string(expiration) + - ",\"validators\":["; + ",\"expiration\":" + std::to_string(validUntil); + if (validFrom) + data += ",\"effective\":" + std::to_string(*validFrom); + data += ",\"validators\":["; for (auto const& val : validators) { @@ -162,6 +168,20 @@ private: return res; } + void + checkResult( + ValidatorList::PublisherListStats const& result, + PublicKey pubKey, + ListDisposition expectedWorst, + ListDisposition expectedBest) + { + BEAST_EXPECT( + result.bestDisposition() > ListDisposition::same_sequence || + (result.publisherKey && *result.publisherKey == pubKey)); + BEAST_EXPECT(result.bestDisposition() == expectedBest); + BEAST_EXPECT(result.worstDisposition() == expectedWorst); + } + void testGenesisQuorum() { @@ -451,9 +471,460 @@ private: } void - testApplyList() + testApplyLists() { testcase("Apply list"); + using namespace std::chrono_literals; + + std::string const siteUri = "testApplyList.test"; + + auto checkAvailable = + [this]( + auto const& trustedKeys, + auto const& hexPublic, + auto const& manifest, + auto const version, + std::vector> const& + expected) { + const auto available = trustedKeys->getAvailable(hexPublic); + + BEAST_EXPECT(!version || available); + if (available) + { + auto const& a = *available; + BEAST_EXPECT(a[jss::public_key] == hexPublic); + BEAST_EXPECT(a[jss::manifest] == manifest); + // Because multiple lists were processed, the version was + // overridden + BEAST_EXPECT(a[jss::version] == version); + if (version == 1) + { + BEAST_EXPECT(expected.size() == 1); + BEAST_EXPECT(a[jss::blob] == expected[0].first); + BEAST_EXPECT(a[jss::signature] == expected[0].second); + BEAST_EXPECT(!a.isMember(jss::blobs_v2)); + } + else if (BEAST_EXPECT(a.isMember(jss::blobs_v2))) + { + BEAST_EXPECT(!a.isMember(jss::blob)); + BEAST_EXPECT(!a.isMember(jss::signature)); + auto const& blobs_v2 = a[jss::blobs_v2]; + BEAST_EXPECT( + blobs_v2.isArray() && + blobs_v2.size() == expected.size()); + + for (unsigned int i = 0; i < expected.size(); ++i) + { + BEAST_EXPECT( + blobs_v2[i][jss::blob] == expected[i].first); + BEAST_EXPECT( + blobs_v2[i][jss::signature] == + expected[i].second); + } + } + } + }; + + ManifestCache manifests; + jtx::Env env(*this); + auto& app = env.app(); + auto trustedKeys = std::make_unique( + manifests, + manifests, + env.app().timeKeeper(), + app.config().legacy("database_path"), + env.journal); + + auto expectTrusted = + [this, &trustedKeys](std::vector const& list) { + for (auto const& val : list) + { + BEAST_EXPECT(trustedKeys->listed(val.masterPublic)); + BEAST_EXPECT(trustedKeys->listed(val.signingPublic)); + } + }; + + auto expectUntrusted = + [this, &trustedKeys](std::vector const& list) { + for (auto const& val : list) + { + BEAST_EXPECT(!trustedKeys->listed(val.masterPublic)); + BEAST_EXPECT(!trustedKeys->listed(val.signingPublic)); + } + }; + + auto const publisherSecret = randomSecretKey(); + auto const publisherPublic = + derivePublicKey(KeyType::ed25519, publisherSecret); + const auto hexPublic = + strHex(publisherPublic.begin(), publisherPublic.end()); + auto const pubSigningKeys1 = randomKeyPair(KeyType::secp256k1); + auto const manifest1 = base64_encode(makeManifestString( + publisherPublic, + publisherSecret, + pubSigningKeys1.first, + pubSigningKeys1.second, + 1)); + + std::vector cfgKeys1({strHex(publisherPublic)}); + PublicKey emptyLocalKey; + std::vector emptyCfgKeys; + + BEAST_EXPECT(trustedKeys->load(emptyLocalKey, emptyCfgKeys, cfgKeys1)); + + std::map> const lists = []() { + auto constexpr listSize = 20; + auto constexpr numLists = 9; + std::map> lists; + // 1-based to correspond with the individually named blobs below. + for (auto i = 1; i <= numLists; ++i) + { + auto& list = lists[i]; + list.reserve(listSize); + while (list.size() < listSize) + list.push_back(randomValidator()); + } + return lists; + }(); + + // Attempt an expired list (fail) and a single list (succeed) + env.timeKeeper().set(env.timeKeeper().now() + 1s); + auto const version = 1; + auto const sequence1 = 1; + auto const expiredblob = makeList( + lists.at(1), + sequence1, + env.timeKeeper().now().time_since_epoch().count()); + auto const expiredSig = signList(expiredblob, pubSigningKeys1); + + NetClock::time_point const validUntil = env.timeKeeper().now() + 3600s; + auto const sequence2 = 2; + auto const blob2 = makeList( + lists.at(2), sequence2, validUntil.time_since_epoch().count()); + auto const sig2 = signList(blob2, pubSigningKeys1); + + checkResult( + trustedKeys->applyLists( + manifest1, + version, + {{expiredblob, expiredSig, {}}, {blob2, sig2, {}}}, + siteUri), + publisherPublic, + ListDisposition::expired, + ListDisposition::accepted); + + expectTrusted(lists.at(2)); + + checkAvailable( + trustedKeys, hexPublic, manifest1, version, {{blob2, sig2}}); + + // Do not apply future lists, but process them + auto const version2 = 2; + auto const sequence7 = 7; + auto const effective7 = validUntil - 60s; + auto const expiration7 = effective7 + 3600s; + auto const blob7 = makeList( + lists.at(7), + sequence7, + expiration7.time_since_epoch().count(), + effective7.time_since_epoch().count()); + auto const sig7 = signList(blob7, pubSigningKeys1); + + auto const sequence8 = 8; + auto const effective8 = expiration7 - 60s; + auto const expiration8 = effective8 + 3600s; + auto const blob8 = makeList( + lists.at(8), + sequence8, + expiration8.time_since_epoch().count(), + effective8.time_since_epoch().count()); + auto const sig8 = signList(blob8, pubSigningKeys1); + + checkResult( + trustedKeys->applyLists( + manifest1, + version2, + {{blob7, sig7, {}}, {blob8, sig8, {}}}, + siteUri), + publisherPublic, + ListDisposition::pending, + ListDisposition::pending); + + expectUntrusted(lists.at(7)); + expectUntrusted(lists.at(8)); + + // Do not apply out-of-order future list, but process it + auto const sequence6 = 6; + auto const effective6 = effective7 - 60s; + auto const expiration6 = effective6 + 3600s; + auto const blob6 = makeList( + lists.at(6), + sequence6, + expiration6.time_since_epoch().count(), + effective6.time_since_epoch().count()); + auto const sig6 = signList(blob6, pubSigningKeys1); + + // Process future list that is overridden by a later list + auto const sequence6a = 5; + auto const effective6a = effective6 + 60s; + auto const expiration6a = effective6a + 3600s; + auto const blob6a = makeList( + lists.at(5), + sequence6a, + expiration6a.time_since_epoch().count(), + effective6a.time_since_epoch().count()); + auto const sig6a = signList(blob6a, pubSigningKeys1); + + checkResult( + trustedKeys->applyLists( + manifest1, + version, + {{blob6a, sig6a, {}}, {blob6, sig6, {}}}, + siteUri), + publisherPublic, + ListDisposition::pending, + ListDisposition::pending); + + expectUntrusted(lists.at(6)); + expectTrusted(lists.at(2)); + + // Do not apply re-process lists known future sequence numbers + + checkResult( + trustedKeys->applyLists( + manifest1, + version, + {{blob7, sig7, {}}, {blob6, sig6, {}}}, + siteUri), + publisherPublic, + ListDisposition::known_sequence, + ListDisposition::known_sequence); + + expectUntrusted(lists.at(6)); + expectUntrusted(lists.at(7)); + expectTrusted(lists.at(2)); + + // do not use list from untrusted publisher + auto const untrustedManifest = base64_encode(makeManifestString( + randomMasterKey(), + publisherSecret, + pubSigningKeys1.first, + pubSigningKeys1.second, + 1)); + + checkResult( + trustedKeys->applyLists( + untrustedManifest, version, {{blob2, sig2, {}}}, siteUri), + publisherPublic, + ListDisposition::untrusted, + ListDisposition::untrusted); + + // do not use list with unhandled version + auto const badVersion = 666; + checkResult( + trustedKeys->applyLists( + manifest1, badVersion, {{blob2, sig2, {}}}, siteUri), + publisherPublic, + ListDisposition::unsupported_version, + ListDisposition::unsupported_version); + + // apply list with highest sequence number + auto const sequence3 = 3; + auto const blob3 = makeList( + lists.at(3), sequence3, validUntil.time_since_epoch().count()); + auto const sig3 = signList(blob3, pubSigningKeys1); + + checkResult( + trustedKeys->applyLists( + manifest1, version, {{blob3, sig3, {}}}, siteUri), + publisherPublic, + ListDisposition::accepted, + ListDisposition::accepted); + + expectUntrusted(lists.at(1)); + expectUntrusted(lists.at(2)); + expectTrusted(lists.at(3)); + + // Note that blob6a is not present, because it was dropped during + // processing + checkAvailable( + trustedKeys, + hexPublic, + manifest1, + 2, + {{blob3, sig3}, {blob6, sig6}, {blob7, sig7}, {blob8, sig8}}); + + // do not re-apply lists with past or current sequence numbers + checkResult( + trustedKeys->applyLists( + manifest1, + version, + {{blob2, sig2, {}}, {blob3, sig3, {}}}, + siteUri), + publisherPublic, + ListDisposition::stale, + ListDisposition::same_sequence); + + // apply list with new publisher key updated by manifest. Also send some + // old lists along with the old manifest + auto const pubSigningKeys2 = randomKeyPair(KeyType::secp256k1); + auto manifest2 = base64_encode(makeManifestString( + publisherPublic, + publisherSecret, + pubSigningKeys2.first, + pubSigningKeys2.second, + 2)); + + auto const sequence4 = 4; + auto const blob4 = makeList( + lists.at(4), sequence4, validUntil.time_since_epoch().count()); + auto const sig4 = signList(blob4, pubSigningKeys2); + + checkResult( + trustedKeys->applyLists( + manifest2, + version, + {{blob2, sig2, manifest1}, + {blob3, sig3, manifest1}, + {blob4, sig4, {}}}, + siteUri), + publisherPublic, + ListDisposition::stale, + ListDisposition::accepted); + + expectUntrusted(lists.at(2)); + expectUntrusted(lists.at(3)); + expectTrusted(lists.at(4)); + + checkAvailable( + trustedKeys, + hexPublic, + manifest2, + 2, + {{blob4, sig4}, {blob6, sig6}, {blob7, sig7}, {blob8, sig8}}); + + auto const sequence5 = 5; + auto const blob5 = makeList( + lists.at(5), sequence5, validUntil.time_since_epoch().count()); + auto const badSig = signList(blob5, pubSigningKeys1); + checkResult( + trustedKeys->applyLists( + manifest1, version, {{blob5, badSig, {}}}, siteUri), + publisherPublic, + ListDisposition::invalid, + ListDisposition::invalid); + + expectUntrusted(lists.at(2)); + expectUntrusted(lists.at(3)); + expectTrusted(lists.at(4)); + expectUntrusted(lists.at(5)); + + // Reprocess the pending list, but the signature is no longer valid + checkResult( + trustedKeys->applyLists( + manifest1, + version, + {{blob7, sig7, {}}, {blob8, sig8, {}}}, + siteUri), + publisherPublic, + ListDisposition::invalid, + ListDisposition::invalid); + + expectTrusted(lists.at(4)); + expectUntrusted(lists.at(7)); + expectUntrusted(lists.at(8)); + + // Automatically rotate the first pending already processed list using + // updateTrusted. Note that the timekeeper is NOT moved, so the close + // time will be ahead of the test's wall clock + trustedKeys->updateTrusted( + {}, + effective6 + 1s, + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); + + expectUntrusted(lists.at(3)); + expectTrusted(lists.at(6)); + + checkAvailable( + trustedKeys, + hexPublic, + manifest2, + 2, + {{blob6, sig6}, {blob7, sig7}, {blob8, sig8}}); + + // Automatically rotate the LAST pending list using updateTrusted, + // bypassing blob7. Note that the timekeeper IS moved, so the provided + // close time will be behind the test's wall clock, and thus the wall + // clock is used. + env.timeKeeper().set(effective8); + trustedKeys->updateTrusted( + {}, + effective8 + 1s, + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); + + expectUntrusted(lists.at(6)); + expectUntrusted(lists.at(7)); + expectTrusted(lists.at(8)); + + checkAvailable(trustedKeys, hexPublic, manifest2, 2, {{blob8, sig8}}); + + // resign the pending list with new key and validate it, but it's + // already valid Also try reprocessing the pending list with an + // explicit manifest + // - it is still invalid + auto const sig8_2 = signList(blob8, pubSigningKeys2); + + checkResult( + trustedKeys->applyLists( + manifest2, + version, + {{blob8, sig8, manifest1}, {blob8, sig8_2, {}}}, + siteUri), + publisherPublic, + ListDisposition::invalid, + ListDisposition::same_sequence); + + expectTrusted(lists.at(8)); + + checkAvailable(trustedKeys, hexPublic, manifest2, 2, {{blob8, sig8}}); + + // do not apply list with revoked publisher key + // applied list is removed due to revoked publisher key + auto const signingKeysMax = randomKeyPair(KeyType::secp256k1); + auto maxManifest = base64_encode( + makeRevocationString(publisherPublic, publisherSecret)); + + auto const sequence9 = 9; + auto const blob9 = makeList( + lists.at(9), sequence9, validUntil.time_since_epoch().count()); + auto const sig9 = signList(blob9, signingKeysMax); + + checkResult( + trustedKeys->applyLists( + maxManifest, version, {{blob9, sig9, {}}}, siteUri), + publisherPublic, + ListDisposition::untrusted, + ListDisposition::untrusted); + + BEAST_EXPECT(!trustedKeys->trustedPublisher(publisherPublic)); + for (auto const& [num, list] : lists) + { + (void)num; + expectUntrusted(list); + } + + checkAvailable(trustedKeys, hexPublic, manifest2, 0, {}); + } + + void + testGetAvailable() + { + testcase("GetAvailable"); + using namespace std::chrono_literals; std::string const siteUri = "testApplyList.test"; @@ -470,8 +941,10 @@ private: auto const publisherSecret = randomSecretKey(); auto const publisherPublic = derivePublicKey(KeyType::ed25519, publisherSecret); + const auto hexPublic = + strHex(publisherPublic.begin(), publisherPublic.end()); auto const pubSigningKeys1 = randomKeyPair(KeyType::secp256k1); - auto const manifest1 = base64_encode(makeManifestString( + auto const manifest = base64_encode(makeManifestString( publisherPublic, publisherSecret, pubSigningKeys1.first, @@ -484,168 +957,103 @@ private: BEAST_EXPECT(trustedKeys->load(emptyLocalKey, emptyCfgKeys, cfgKeys1)); - auto constexpr listSize = 20; - std::vector list1; - list1.reserve(listSize); - while (list1.size() < listSize) - list1.push_back(randomValidator()); + std::vector const list = []() { + auto constexpr listSize = 20; + std::vector list; + list.reserve(listSize); + while (list.size() < listSize) + list.push_back(randomValidator()); + return list; + }(); - std::vector list2; - list2.reserve(listSize); - while (list2.size() < listSize) - list2.push_back(randomValidator()); + // Process a list + env.timeKeeper().set(env.timeKeeper().now() + 1s); + NetClock::time_point const validUntil = env.timeKeeper().now() + 3600s; + auto const blob = + makeList(list, 1, validUntil.time_since_epoch().count()); + auto const sig = signList(blob, pubSigningKeys1); - // do not apply expired list - auto const version = 1; - auto const sequence = 1; - auto const expiredblob = makeList( - list1, sequence, env.timeKeeper().now().time_since_epoch().count()); - auto const expiredSig = signList(expiredblob, pubSigningKeys1); - - BEAST_EXPECT( - ListDisposition::stale == - trustedKeys - ->applyList( - manifest1, expiredblob, expiredSig, version, siteUri) - .disposition); - - // apply single list - using namespace std::chrono_literals; - NetClock::time_point const expiration = env.timeKeeper().now() + 3600s; - auto const blob1 = - makeList(list1, sequence, expiration.time_since_epoch().count()); - auto const sig1 = signList(blob1, pubSigningKeys1); - - BEAST_EXPECT( - ListDisposition::accepted == - trustedKeys->applyList(manifest1, blob1, sig1, version, siteUri) - .disposition); - - for (auto const& val : list1) { - BEAST_EXPECT(trustedKeys->listed(val.masterPublic)); - BEAST_EXPECT(trustedKeys->listed(val.signingPublic)); + // list unavailable + auto const available = trustedKeys->getAvailable(hexPublic); + BEAST_EXPECT(!available); } - // do not use list from untrusted publisher - auto const untrustedManifest = base64_encode(makeManifestString( - randomMasterKey(), - publisherSecret, - pubSigningKeys1.first, - pubSigningKeys1.second, - 1)); - BEAST_EXPECT( - ListDisposition::untrusted == - trustedKeys - ->applyList(untrustedManifest, blob1, sig1, version, siteUri) - .disposition); + trustedKeys->applyLists(manifest, 1, {{blob, sig, {}}}, siteUri) + .bestDisposition() == ListDisposition::accepted); - // do not use list with unhandled version - auto const badVersion = 666; - BEAST_EXPECT( - ListDisposition::unsupported_version == - trustedKeys->applyList(manifest1, blob1, sig1, badVersion, siteUri) - .disposition); - - // apply list with highest sequence number - auto const sequence2 = 2; - auto const blob2 = - makeList(list2, sequence2, expiration.time_since_epoch().count()); - auto const sig2 = signList(blob2, pubSigningKeys1); - - BEAST_EXPECT( - ListDisposition::accepted == - trustedKeys->applyList(manifest1, blob2, sig2, version, siteUri) - .disposition); - - for (auto const& val : list1) { - BEAST_EXPECT(!trustedKeys->listed(val.masterPublic)); - BEAST_EXPECT(!trustedKeys->listed(val.signingPublic)); + // invalid public key + auto const available = + trustedKeys->getAvailable(hexPublic + "invalid", 1); + BEAST_EXPECT(!available); } - for (auto const& val : list2) { - BEAST_EXPECT(trustedKeys->listed(val.masterPublic)); - BEAST_EXPECT(trustedKeys->listed(val.signingPublic)); + // unknown public key + auto const badSecret = randomSecretKey(); + auto const badPublic = derivePublicKey(KeyType::ed25519, badSecret); + const auto hexBad = strHex(badPublic.begin(), badPublic.end()); + + auto const available = trustedKeys->getAvailable(hexBad, 1); + BEAST_EXPECT(!available); + } + { + // bad version 0 + auto const available = trustedKeys->getAvailable(hexPublic, 0); + if (BEAST_EXPECT(available)) + { + auto const& a = *available; + BEAST_EXPECT(!a); + } + } + { + // bad version 3 + auto const available = trustedKeys->getAvailable(hexPublic, 3); + if (BEAST_EXPECT(available)) + { + auto const& a = *available; + BEAST_EXPECT(!a); + } + } + { + // version 1 + auto const available = trustedKeys->getAvailable(hexPublic, 1); + if (BEAST_EXPECT(available)) + { + auto const& a = *available; + BEAST_EXPECT(a[jss::public_key] == hexPublic); + BEAST_EXPECT(a[jss::manifest] == manifest); + BEAST_EXPECT(a[jss::version] == 1); + + BEAST_EXPECT(a[jss::blob] == blob); + BEAST_EXPECT(a[jss::signature] == sig); + BEAST_EXPECT(!a.isMember(jss::blobs_v2)); + } } - const auto hexPublic = - strHex(publisherPublic.begin(), publisherPublic.end()); - - const auto available = trustedKeys->getAvailable(hexPublic); - - if (BEAST_EXPECT(available)) { - auto const& a = *available; - BEAST_EXPECT(a[jss::public_key] == hexPublic); - BEAST_EXPECT(a[jss::blob] == blob2); - BEAST_EXPECT(a[jss::manifest] == manifest1); - BEAST_EXPECT(a[jss::version] == version); - BEAST_EXPECT(a[jss::signature] == sig2); - } + // version 2 + auto const available = trustedKeys->getAvailable(hexPublic, 2); + if (BEAST_EXPECT(available)) + { + auto const& a = *available; + BEAST_EXPECT(a[jss::public_key] == hexPublic); + BEAST_EXPECT(a[jss::manifest] == manifest); + BEAST_EXPECT(a[jss::version] == 2); - // do not re-apply lists with past or current sequence numbers - BEAST_EXPECT( - ListDisposition::stale == - trustedKeys->applyList(manifest1, blob1, sig1, version, siteUri) - .disposition); + if (BEAST_EXPECT(a.isMember(jss::blobs_v2))) + { + BEAST_EXPECT(!a.isMember(jss::blob)); + BEAST_EXPECT(!a.isMember(jss::signature)); + auto const& blobs_v2 = a[jss::blobs_v2]; + BEAST_EXPECT(blobs_v2.isArray() && blobs_v2.size() == 1); - BEAST_EXPECT( - ListDisposition::same_sequence == - trustedKeys->applyList(manifest1, blob2, sig2, version, siteUri) - .disposition); - - // apply list with new publisher key updated by manifest - auto const pubSigningKeys2 = randomKeyPair(KeyType::secp256k1); - auto manifest2 = base64_encode(makeManifestString( - publisherPublic, - publisherSecret, - pubSigningKeys2.first, - pubSigningKeys2.second, - 2)); - - auto const sequence3 = 3; - auto const blob3 = - makeList(list1, sequence3, expiration.time_since_epoch().count()); - auto const sig3 = signList(blob3, pubSigningKeys2); - - BEAST_EXPECT( - ListDisposition::accepted == - trustedKeys->applyList(manifest2, blob3, sig3, version, siteUri) - .disposition); - - auto const sequence4 = 4; - auto const blob4 = - makeList(list1, sequence4, expiration.time_since_epoch().count()); - auto const badSig = signList(blob4, pubSigningKeys1); - BEAST_EXPECT( - ListDisposition::invalid == - trustedKeys->applyList(manifest1, blob4, badSig, version, siteUri) - .disposition); - - // do not apply list with revoked publisher key - // applied list is removed due to revoked publisher key - auto const signingKeysMax = randomKeyPair(KeyType::secp256k1); - auto maxManifest = base64_encode( - makeRevocationString(publisherPublic, publisherSecret)); - - auto const sequence5 = 5; - auto const blob5 = - makeList(list1, sequence5, expiration.time_since_epoch().count()); - auto const sig5 = signList(blob5, signingKeysMax); - - BEAST_EXPECT( - ListDisposition::untrusted == - trustedKeys->applyList(maxManifest, blob5, sig5, version, siteUri) - .disposition); - - BEAST_EXPECT(!trustedKeys->trustedPublisher(publisherPublic)); - for (auto const& val : list1) - { - BEAST_EXPECT(!trustedKeys->listed(val.masterPublic)); - BEAST_EXPECT(!trustedKeys->listed(val.signingPublic)); + BEAST_EXPECT(blobs_v2[0u][jss::blob] == blob); + BEAST_EXPECT(blobs_v2[0u][jss::signature] == sig); + } + } } } @@ -691,8 +1099,12 @@ private: // updateTrusted should make all configured validators trusted // even if they are not active/seen - TrustChanges changes = - trustedKeysOuter->updateTrusted(activeValidatorsOuter); + TrustChanges changes = trustedKeysOuter->updateTrusted( + activeValidatorsOuter, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); for (auto const& val : unseenValidators) activeValidatorsOuter.emplace(val); @@ -713,7 +1125,12 @@ private: fail(); } - changes = trustedKeysOuter->updateTrusted(activeValidatorsOuter); + changes = trustedKeysOuter->updateTrusted( + activeValidatorsOuter, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.added.empty()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT( @@ -736,8 +1153,12 @@ private: activeValidatorsOuter.emplace(calcNodeID(masterPublic)); // Should not trust ephemeral signing key if there is no manifest - TrustChanges changes = - trustedKeysOuter->updateTrusted(activeValidatorsOuter); + TrustChanges changes = trustedKeysOuter->updateTrusted( + activeValidatorsOuter, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.added == asNodeIDs({masterPublic})); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT( @@ -802,7 +1223,12 @@ private: BEAST_EXPECT(trustedKeysOuter->listed(masterPublic)); BEAST_EXPECT(trustedKeysOuter->trusted(masterPublic)); - changes = trustedKeysOuter->updateTrusted(activeValidatorsOuter); + changes = trustedKeysOuter->updateTrusted( + activeValidatorsOuter, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed == asNodeIDs({masterPublic})); BEAST_EXPECT(changes.added.empty()); BEAST_EXPECT( @@ -835,8 +1261,12 @@ private: BEAST_EXPECT(trustedKeys->load( emptyLocalKeyOuter, emptyCfgKeys, cfgPublishers)); - TrustChanges changes = - trustedKeys->updateTrusted(activeValidatorsOuter); + TrustChanges changes = trustedKeys->updateTrusted( + activeValidatorsOuter, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT(changes.added.empty()); BEAST_EXPECT( @@ -876,14 +1306,24 @@ private: BEAST_EXPECT(trustedKeys->load( emptyLocalKeyOuter, cfgKeys, cfgPublishersOuter)); - TrustChanges changes = trustedKeys->updateTrusted(activeValidators); + TrustChanges changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT(changes.added == expectedTrusted); BEAST_EXPECT(trustedKeys->quorum() == minQuorum); // Use normal quorum when seen validators >= quorum activeValidators.emplace(toBeSeen); - changes = trustedKeys->updateTrusted(activeValidators); + changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT(changes.added.empty()); BEAST_EXPECT(trustedKeys->quorum() == std::ceil(n * 0.8f)); @@ -921,18 +1361,24 @@ private: auto const version = 1; auto const sequence = 1; using namespace std::chrono_literals; - NetClock::time_point const expiration = + NetClock::time_point const validUntil = env.timeKeeper().now() + 60s; auto const blob = - makeList(list, sequence, expiration.time_since_epoch().count()); + makeList(list, sequence, validUntil.time_since_epoch().count()); auto const sig = signList(blob, pubSigningKeys); BEAST_EXPECT( ListDisposition::accepted == - trustedKeys->applyList(manifest, blob, sig, version, siteUri) - .disposition); + trustedKeys + ->applyLists(manifest, version, {{blob, sig, {}}}, siteUri) + .bestDisposition()); - TrustChanges changes = trustedKeys->updateTrusted(activeValidators); + TrustChanges changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT(changes.added == activeValidators); for (Validator const& val : list) @@ -942,8 +1388,13 @@ private: } BEAST_EXPECT(trustedKeys->quorum() == 2); - env.timeKeeper().set(expiration); - changes = trustedKeys->updateTrusted(activeValidators); + env.timeKeeper().set(validUntil); + changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed == activeValidators); BEAST_EXPECT(changes.added.empty()); BEAST_EXPECT(!trustedKeys->trusted(list[0].masterPublic)); @@ -964,10 +1415,17 @@ private: BEAST_EXPECT( ListDisposition::accepted == - trustedKeys->applyList(manifest, blob2, sig2, version, siteUri) - .disposition); + trustedKeys + ->applyLists( + manifest, version, {{blob2, sig2, {}}}, siteUri) + .bestDisposition()); - changes = trustedKeys->updateTrusted(activeValidators); + changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT( changes.added == @@ -1005,8 +1463,12 @@ private: activeKeys.emplace(valKey); BEAST_EXPECT(trustedKeys->load( emptyLocalKeyOuter, cfgKeys, cfgPublishers)); - TrustChanges changes = - trustedKeys->updateTrusted(activeValidators); + TrustChanges changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT(changes.added == asNodeIDs({valKey})); BEAST_EXPECT( @@ -1041,8 +1503,12 @@ private: BEAST_EXPECT( trustedKeys->load(localKey, cfgKeys, cfgPublishers)); - TrustChanges changes = - trustedKeys->updateTrusted(activeValidators); + TrustChanges changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); if (cfgKeys.size() > 2) BEAST_EXPECT(changes.added == asNodeIDs({valKey})); @@ -1105,24 +1571,30 @@ private: auto const version = 1; auto const sequence = 1; using namespace std::chrono_literals; - NetClock::time_point const expiration = + NetClock::time_point const validUntil = env.timeKeeper().now() + 3600s; auto const blob = makeList( - valKeys, sequence, expiration.time_since_epoch().count()); + valKeys, sequence, validUntil.time_since_epoch().count()); auto const sig = signList(blob, pubSigningKeys); BEAST_EXPECT( ListDisposition::accepted == trustedKeys - ->applyList(manifest, blob, sig, version, siteUri) - .disposition); + ->applyLists( + manifest, version, {{blob, sig, {}}}, siteUri) + .bestDisposition()); }; // Apply multiple published lists for (auto i = 0; i < 3; ++i) addPublishedList(); - TrustChanges changes = trustedKeys->updateTrusted(activeValidators); + TrustChanges changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT( trustedKeys->quorum() == std::ceil(valKeys.size() * 0.8f)); @@ -1192,11 +1664,11 @@ private: // Store prepared list data to control when it is applied struct PreparedList { + PublicKey publisherPublic; std::string manifest; - std::string blob; - std::string sig; + std::vector blobs; int version; - NetClock::time_point expiration; + std::vector expirations; }; using namespace std::chrono_literals; @@ -1220,17 +1692,32 @@ private: BEAST_EXPECT(trustedKeys->load( emptyLocalKey, emptyCfgKeys, cfgPublishers)); - auto const version = 1; - auto const sequence = 1; - NetClock::time_point const expiration = - env.timeKeeper().now() + 3600s; - auto const blob = makeList( + auto const version = 2; + auto const sequence1 = 1; + NetClock::time_point const expiration1 = + env.timeKeeper().now() + 1800s; + auto const blob1 = makeList( validators, - sequence, - expiration.time_since_epoch().count()); - auto const sig = signList(blob, pubSigningKeys); + sequence1, + expiration1.time_since_epoch().count()); + auto const sig1 = signList(blob1, pubSigningKeys); - return PreparedList{manifest, blob, sig, version, expiration}; + NetClock::time_point const effective2 = expiration1 - 300s; + NetClock::time_point const expiration2 = effective2 + 1800s; + auto const sequence2 = 2; + auto const blob2 = makeList( + validators, + sequence2, + expiration2.time_since_epoch().count(), + effective2.time_since_epoch().count()); + auto const sig2 = signList(blob2, pubSigningKeys); + + return PreparedList{ + publisherPublic, + manifest, + {{blob1, sig1, {}}, {blob2, sig2, {}}}, + version, + {expiration1, expiration2}}; }; // Configure two publishers and prepare 2 lists @@ -1242,44 +1729,62 @@ private: BEAST_EXPECT(trustedKeys->expires() == boost::none); // Apply first list - BEAST_EXPECT( - ListDisposition::accepted == - trustedKeys - ->applyList( - prep1.manifest, - prep1.blob, - prep1.sig, - prep1.version, - siteUri) - .disposition); + checkResult( + trustedKeys->applyLists( + prep1.manifest, prep1.version, prep1.blobs, siteUri), + prep1.publisherPublic, + ListDisposition::pending, + ListDisposition::accepted); - // One list still hasn't published, so expiration is still unknown + // One list still hasn't published, so expiration is still + // unknown BEAST_EXPECT(trustedKeys->expires() == boost::none); // Apply second list - BEAST_EXPECT( - ListDisposition::accepted == - trustedKeys - ->applyList( - prep2.manifest, - prep2.blob, - prep2.sig, - prep2.version, - siteUri) - .disposition); - + checkResult( + trustedKeys->applyLists( + prep2.manifest, prep2.version, prep2.blobs, siteUri), + prep2.publisherPublic, + ListDisposition::pending, + ListDisposition::accepted); // We now have loaded both lists, so expiration is known BEAST_EXPECT( trustedKeys->expires() && - trustedKeys->expires().get() == prep1.expiration); + trustedKeys->expires().get() == prep1.expirations.back()); - // Advance past the first list's expiration, but it remains the - // earliest expiration - env.timeKeeper().set(prep1.expiration + 1s); - trustedKeys->updateTrusted(activeValidators); - BEAST_EXPECT( - trustedKeys->expires() && - trustedKeys->expires().get() == prep1.expiration); + // Advance past the first list's LAST validFrom date. It remains + // the earliest validUntil, while rotating in the second list + { + env.timeKeeper().set(prep1.expirations.front() - 1s); + auto changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); + BEAST_EXPECT( + trustedKeys->expires() && + trustedKeys->expires().get() == prep1.expirations.back()); + BEAST_EXPECT(!changes.added.empty()); + BEAST_EXPECT(changes.removed.empty()); + } + + // Advance past the first list's LAST validUntil, but it remains + // the earliest validUntil, while being invalidated + { + env.timeKeeper().set(prep1.expirations.back() + 1s); + auto changes = trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); + BEAST_EXPECT( + trustedKeys->expires() && + trustedKeys->expires().get() == prep1.expirations.back()); + BEAST_EXPECT(changes.added.empty()); + BEAST_EXPECT(changes.removed.empty()); + } } } @@ -1315,7 +1820,12 @@ private: } if (trustedKeys->load(emptyLocalKey, cfgKeys, cfgPublishers)) { - trustedKeys->updateTrusted(activeValidators); + trustedKeys->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); if (trustedKeys->quorum() == std::ceil(cfgKeys.size() * 0.8f)) return trustedKeys; } @@ -1359,7 +1869,12 @@ private: ++it; } validators->setNegativeUNL(nUnl); - validators->updateTrusted(activeValidators); + validators->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT( validators->quorum() == static_cast(std::ceil( @@ -1399,7 +1914,12 @@ private: if (nUnl.find(n) == nUnl.end()) return false; } - validators->updateTrusted(activeValidators); + validators->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); return validators->quorum() == quorum; } return false; @@ -1411,7 +1931,8 @@ private: } { - // nUNL overlap: |nUNL - UNL| = 5, with nUNL size: 18 + // nUNL overlap: |nUNL - UNL| = 5, with nUNL size: + // 18 auto nUnl = validators->getNegativeUNL(); BEAST_EXPECT(nUnl.size() == 12); std::size_t ss = 33; @@ -1424,7 +1945,12 @@ private: nUnl.emplace(s); } validators->setNegativeUNL(nUnl); - validators->updateTrusted(activeValidators); + validators->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(validators->quorum() == 39); } } @@ -1446,7 +1972,12 @@ private: activeValidators.insert(calcNodeID(*it)); ++it; } - validators->updateTrusted(activeValidators); + validators->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(validators->quorum() == 48); hash_set nUnl; it = unl.begin(); @@ -1456,24 +1987,441 @@ private: ++it; } validators->setNegativeUNL(nUnl); - validators->updateTrusted(activeValidators); + validators->updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(validators->quorum() == 30); } } } + void + testSha512Hash() + { + testcase("Sha512 hashing"); + // Tests that ValidatorList hash_append helpers with a single blob + // returns the same result as ripple::Sha512Half used by the + // TMValidatorList protocol message handler + std::string const manifest = "This is not really a manifest"; + std::string const blob = "This is not really a blob"; + std::string const signature = "This is not really a signature"; + std::uint32_t const version = 1; + + auto const global = sha512Half(manifest, blob, signature, version); + BEAST_EXPECT(!!global); + + std::vector blobVector(1); + blobVector[0].blob = blob; + blobVector[0].signature = signature; + BEAST_EXPECT(global == sha512Half(manifest, blobVector, version)); + BEAST_EXPECT(global != sha512Half(signature, blobVector, version)); + + { + std::map blobMap{ + {99, blobVector[0]}}; + BEAST_EXPECT(global == sha512Half(manifest, blobMap, version)); + BEAST_EXPECT(global != sha512Half(blob, blobMap, version)); + } + + { + protocol::TMValidatorList msg1; + msg1.set_manifest(manifest); + msg1.set_blob(blob); + msg1.set_signature(signature); + msg1.set_version(version); + BEAST_EXPECT(global == sha512Half(msg1)); + msg1.set_signature(blob); + BEAST_EXPECT(global != sha512Half(msg1)); + } + + { + protocol::TMValidatorListCollection msg2; + msg2.set_manifest(manifest); + msg2.set_version(version); + auto& bi = *msg2.add_blobs(); + bi.set_blob(blob); + bi.set_signature(signature); + BEAST_EXPECT(global == sha512Half(msg2)); + bi.set_manifest(manifest); + BEAST_EXPECT(global != sha512Half(msg2)); + } + } + + void + testBuildMessages() + { + testcase("Build and split messages"); + + std::uint32_t const manifestCutoff = 7; + auto extractHeader = [this](Message& message) { + auto const& buffer = + message.getBuffer(compression::Compressed::Off); + + boost::beast::multi_buffer buffers; + + // simulate multi-buffer + auto start = buffer.begin(); + auto end = buffer.end(); + std::vector slice(start, end); + buffers.commit(boost::asio::buffer_copy( + buffers.prepare(slice.size()), boost::asio::buffer(slice))); + + boost::system::error_code ec; + auto header = + detail::parseMessageHeader(ec, buffers.data(), buffers.size()); + BEAST_EXPECT(!ec); + return std::make_pair(header, buffers); + }; + auto extractProtocolMessage1 = [this, + &extractHeader](Message& message) { + auto [header, buffers] = extractHeader(message); + if (BEAST_EXPECT(header) && + BEAST_EXPECT(header->message_type == protocol::mtVALIDATORLIST)) + { + auto const msg = + detail::parseMessageContent( + *header, buffers.data()); + BEAST_EXPECT(msg); + return msg; + } + return std::shared_ptr(); + }; + auto extractProtocolMessage2 = [this, + &extractHeader](Message& message) { + auto [header, buffers] = extractHeader(message); + if (BEAST_EXPECT(header) && + BEAST_EXPECT( + header->message_type == + protocol::mtVALIDATORLISTCOLLECTION)) + { + auto const msg = detail::parseMessageContent< + protocol::TMValidatorListCollection>( + *header, buffers.data()); + BEAST_EXPECT(msg); + return msg; + } + return std::shared_ptr(); + }; + auto verifyMessage = + [this, + manifestCutoff, + &extractProtocolMessage1, + &extractProtocolMessage2]( + auto const version, + auto const& manifest, + auto const& blobInfos, + auto const& messages, + std::vector>> + expectedInfo) { + BEAST_EXPECT(messages.size() == expectedInfo.size()); + auto msgIter = expectedInfo.begin(); + for (auto const& messageWithHash : messages) + { + if (!BEAST_EXPECT(msgIter != expectedInfo.end())) + break; + if (!BEAST_EXPECT(messageWithHash.message)) + continue; + auto const& expectedSeqs = msgIter->second; + auto seqIter = expectedSeqs.begin(); + auto const size = + messageWithHash.message + ->getBuffer(compression::Compressed::Off) + .size(); + // This size is arbitrary, but shouldn't change + BEAST_EXPECT(size == msgIter->first); + if (expectedSeqs.size() == 1) + { + auto const msg = + extractProtocolMessage1(*messageWithHash.message); + auto const expectedVersion = 1; + if (BEAST_EXPECT(msg)) + { + BEAST_EXPECT(msg->version() == expectedVersion); + if (!BEAST_EXPECT(seqIter != expectedSeqs.end())) + continue; + auto const& expectedBlob = blobInfos.at(*seqIter); + BEAST_EXPECT( + (*seqIter < manifestCutoff) == + !!expectedBlob.manifest); + auto const expectedManifest = + *seqIter < manifestCutoff && + expectedBlob.manifest + ? *expectedBlob.manifest + : manifest; + BEAST_EXPECT(msg->manifest() == expectedManifest); + BEAST_EXPECT(msg->blob() == expectedBlob.blob); + BEAST_EXPECT( + msg->signature() == expectedBlob.signature); + ++seqIter; + BEAST_EXPECT(seqIter == expectedSeqs.end()); + + BEAST_EXPECT( + messageWithHash.hash == + sha512Half( + expectedManifest, + expectedBlob.blob, + expectedBlob.signature, + expectedVersion)); + } + } + else + { + std::vector hashingBlobs; + hashingBlobs.reserve(msgIter->second.size()); + + auto const msg = + extractProtocolMessage2(*messageWithHash.message); + if (BEAST_EXPECT(msg)) + { + BEAST_EXPECT(msg->version() == version); + BEAST_EXPECT(msg->manifest() == manifest); + for (auto const& blobInfo : msg->blobs()) + { + if (!BEAST_EXPECT( + seqIter != expectedSeqs.end())) + break; + auto const& expectedBlob = + blobInfos.at(*seqIter); + hashingBlobs.push_back(expectedBlob); + BEAST_EXPECT( + blobInfo.has_manifest() == + !!expectedBlob.manifest); + BEAST_EXPECT( + blobInfo.has_manifest() == + (*seqIter < manifestCutoff)); + + if (*seqIter < manifestCutoff) + BEAST_EXPECT( + blobInfo.manifest() == + *expectedBlob.manifest); + BEAST_EXPECT( + blobInfo.blob() == expectedBlob.blob); + BEAST_EXPECT( + blobInfo.signature() == + expectedBlob.signature); + ++seqIter; + } + BEAST_EXPECT(seqIter == expectedSeqs.end()); + } + BEAST_EXPECT( + messageWithHash.hash == + sha512Half(manifest, hashingBlobs, version)); + } + ++msgIter; + } + BEAST_EXPECT(msgIter == expectedInfo.end()); + }; + auto verifyBuildMessages = + [this]( + std::pair const& result, + std::size_t expectedSequence, + std::size_t expectedSize) { + BEAST_EXPECT(result.first == expectedSequence); + BEAST_EXPECT(result.second == expectedSize); + }; + + std::string const manifest = "This is not a manifest"; + std::uint32_t const version = 2; + // Mutable so items can be removed in later tests. + auto const blobInfos = [manifestCutoff = manifestCutoff]() { + std::map bis; + + for (auto seq : {5, 6, 7, 10, 12}) + { + auto& b = bis[seq]; + std::stringstream s; + s << "This is not a blob with sequence " << seq; + b.blob = s.str(); + s.str(std::string()); + s << "This is not a signature for sequence " << seq; + b.signature = s.str(); + if (seq < manifestCutoff) + { + // add a manifest for the "early" blobs + s.str(std::string()); + s << "This is not manifest " << seq; + b.manifest = s.str(); + } + } + return bis; + }(); + auto const maxSequence = blobInfos.rbegin()->first; + BEAST_EXPECT(maxSequence == 12); + + std::vector messages; + + // Version 1 + + // This peer has a VL ahead of our "current" + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 1, 8, maxSequence, version, manifest, blobInfos, messages), + 0, + 0); + BEAST_EXPECT(messages.size() == 0); + + // Don't repeat the work if messages is populated, even though the + // peerSequence provided indicates it should. Note that this + // situation is contrived for this test and should never happen in + // real code. + messages.emplace_back(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 1, 3, maxSequence, version, manifest, blobInfos, messages), + 5, + 0); + BEAST_EXPECT(messages.size() == 1 && !messages.front().message); + + // Generate a version 1 message + messages.clear(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 1, 3, maxSequence, version, manifest, blobInfos, messages), + 5, + 1); + if (BEAST_EXPECT(messages.size() == 1) && + BEAST_EXPECT(messages.front().message)) + { + auto const& messageWithHash = messages.front(); + auto const msg = extractProtocolMessage1(*messageWithHash.message); + auto const size = + messageWithHash.message->getBuffer(compression::Compressed::Off) + .size(); + // This size is arbitrary, but shouldn't change + BEAST_EXPECT(size == 108); + auto const& expected = blobInfos.at(5); + if (BEAST_EXPECT(msg)) + { + BEAST_EXPECT(msg->version() == 1); + BEAST_EXPECT(msg->manifest() == *expected.manifest); + BEAST_EXPECT(msg->blob() == expected.blob); + BEAST_EXPECT(msg->signature() == expected.signature); + } + BEAST_EXPECT( + messageWithHash.hash == + sha512Half( + *expected.manifest, expected.blob, expected.signature, 1)); + } + + // Version 2 + + messages.clear(); + + // This peer has a VL ahead of us. + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, + maxSequence * 2, + maxSequence, + version, + manifest, + blobInfos, + messages), + 0, + 0); + BEAST_EXPECT(messages.size() == 0); + + // Don't repeat the work if messages is populated, even though the + // peerSequence provided indicates it should. Note that this + // situation is contrived for this test and should never happen in + // real code. + messages.emplace_back(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, 3, maxSequence, version, manifest, blobInfos, messages), + maxSequence, + 0); + BEAST_EXPECT(messages.size() == 1 && !messages.front().message); + + // Generate a version 2 message. Don't send the current + messages.clear(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, 5, maxSequence, version, manifest, blobInfos, messages), + maxSequence, + 4); + verifyMessage( + version, manifest, blobInfos, messages, {{372, {6, 7, 10, 12}}}); + + // Test message splitting on size limits. + + // Set a limit that should give two messages + messages.clear(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, 5, maxSequence, version, manifest, blobInfos, messages, 300), + maxSequence, + 4); + verifyMessage( + version, + manifest, + blobInfos, + messages, + {{212, {6, 7}}, {192, {10, 12}}}); + + // Set a limit between the size of the two earlier messages so one + // will split and the other won't + messages.clear(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, 5, maxSequence, version, manifest, blobInfos, messages, 200), + maxSequence, + 4); + verifyMessage( + version, + manifest, + blobInfos, + messages, + {{108, {6}}, {108, {7}}, {192, {10, 12}}}); + + // Set a limit so that all the VLs are sent individually + messages.clear(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, 5, maxSequence, version, manifest, blobInfos, messages, 150), + maxSequence, + 4); + verifyMessage( + version, + manifest, + blobInfos, + messages, + {{108, {6}}, {108, {7}}, {110, {10}}, {110, {12}}}); + + // Set a limit smaller than some of the messages. Because single + // messages send regardless, they will all still be sent + messages.clear(); + verifyBuildMessages( + ValidatorList::buildValidatorListMessages( + 2, 5, maxSequence, version, manifest, blobInfos, messages, 108), + maxSequence, + 4); + verifyMessage( + version, + manifest, + blobInfos, + messages, + {{108, {6}}, {108, {7}}, {110, {10}}, {110, {12}}}); + } + public: void run() override { testGenesisQuorum(); testConfigLoad(); - testApplyList(); + testApplyLists(); + testGetAvailable(); testUpdateTrusted(); testExpires(); testNegativeUNL(); + testSha512Hash(); + testBuildMessages(); } -}; +}; // namespace test BEAST_DEFINE_TESTSUITE(ValidatorList, app, ripple); diff --git a/src/test/app/ValidatorSite_test.cpp b/src/test/app/ValidatorSite_test.cpp index 9fc80c5a1..3c7bb9c9e 100644 --- a/src/test/app/ValidatorSite_test.cpp +++ b/src/test/app/ValidatorSite_test.cpp @@ -53,6 +53,7 @@ realValidatorContents() } auto constexpr default_expires = std::chrono::seconds{3600}; +auto constexpr default_effective_overlap = std::chrono::seconds{30}; } // namespace detail class ValidatorSite_test : public beast::unit_test::suite @@ -135,6 +136,8 @@ private: bool failApply = false; int serverVersion = 1; std::chrono::seconds expiresFromNow = detail::default_expires; + std::chrono::seconds effectiveOverlap = + detail::default_effective_overlap; int expectedRefreshMin = 0; }; void @@ -146,13 +149,17 @@ private: boost::adaptors::transformed( [](FetchListConfig const& cfg) { return cfg.path + - (cfg.ssl ? " [https]" : " [http]"); + (cfg.ssl ? " [https] v" : " [http] v") + + std::to_string(cfg.serverVersion) + + " " + cfg.msg; }), ", "); using namespace jtx; + using namespace std::chrono_literals; Env env(*this); auto& trustedKeys = env.app().validators(); + env.timeKeeper().set(env.timeKeeper().now() + 30s); test::StreamSink sink; beast::Journal journal{sink}; @@ -184,10 +191,17 @@ private: while (item.list.size() < listSize) item.list.push_back(TrustedPublisherServer::randomValidator()); + NetClock::time_point const expires = + env.timeKeeper().now() + cfg.expiresFromNow; + NetClock::time_point const effective2 = + expires - cfg.effectiveOverlap; + NetClock::time_point const expires2 = + effective2 + cfg.expiresFromNow; item.server = make_TrustedPublisherServer( env.app().getIOService(), item.list, - env.timeKeeper().now() + cfg.expiresFromNow, + expires, + {{effective2, expires2}}, cfg.ssl, cfg.serverVersion); cfgPublishers.push_back(strHex(item.server->publisherPublic())); @@ -201,7 +215,6 @@ private: BEAST_EXPECT( trustedKeys.load(emptyLocalKey, emptyCfgKeys, cfgPublishers)); - using namespace std::chrono_literals; // Normally, tests will only need a fraction of this time, // but sometimes DNS resolution takes an inordinate amount // of time, so the test will just wait. @@ -381,8 +394,15 @@ public: { // fetch single site testFetchList({{"/validators", "", ssl}}); + testFetchList({{"/validators2", "", ssl}}); // fetch multiple sites testFetchList({{"/validators", "", ssl}, {"/validators", "", ssl}}); + testFetchList( + {{"/validators", "", ssl}, {"/validators2", "", ssl}}); + testFetchList( + {{"/validators2", "", ssl}, {"/validators", "", ssl}}); + testFetchList( + {{"/validators2", "", ssl}, {"/validators2", "", ssl}}); // fetch single site with single redirects testFetchList({{"/redirect_once/301", "", ssl}}); testFetchList({{"/redirect_once/302", "", ssl}}); @@ -391,6 +411,19 @@ public: // one redirect, one not testFetchList( {{"/validators", "", ssl}, {"/redirect_once/302", "", ssl}}); + testFetchList( + {{"/validators2", "", ssl}, {"/redirect_once/302", "", ssl}}); + // UNLs with a "gap" between validUntil of one and validFrom of the + // next + testFetchList( + {{"/validators2", + "", + ssl, + false, + false, + 1, + detail::default_expires, + std::chrono::seconds{-90}}}); // fetch single site with undending redirect (fails to load) testFetchList( {{"/redirect_forever/301", @@ -418,6 +451,14 @@ public: ssl, true, true}}); + // one undending redirect, one not + testFetchList( + {{"/validators2", "", ssl}, + {"/redirect_forever/302", + "Exceeded max redirects", + ssl, + true, + true}}); // invalid redir Location testFetchList( {{"/redirect_to/ftp://invalid-url/302", @@ -438,6 +479,12 @@ public: ssl, true, true}}); + testFetchList( + {{"/validators2/bad", + "Unable to parse JSON response", + ssl, + true, + true}}); // error status returned testFetchList( {{"/bad-resource", "returned bad status", ssl, true, true}}); @@ -455,30 +502,96 @@ public: ssl, true, true}}); + testFetchList( + {{"/validators2/missing", + "Missing fields in JSON response", + ssl, + true, + true}}); // timeout testFetchList({{"/sleep/13", "took too long", ssl, true, true}}); + // bad manifest format using known versions + // * Retrieves a v1 formatted list claiming version 2 + testFetchList( + {{"/validators", "Missing fields", ssl, true, true, 2}}); + // * Retrieves a v2 formatted list claiming version 1 + testFetchList( + {{"/validators2", "Missing fields", ssl, true, true, 0}}); // bad manifest version + // Because versions other than 1 are treated as v2, the v1 + // list won't have the blobs_v2 fields, and thus will claim to have + // missing fields testFetchList( - {{"/validators", "Unsupported version", ssl, false, true, 4}}); - using namespace std::chrono_literals; - // get old validator list + {{"/validators", "Missing fields", ssl, true, true, 4}}); testFetchList( - {{"/validators", - "Stale validator list", + {{"/validators2", + "1 unsupported version", ssl, false, true, - 1, - 0s}}); - // force an out-of-range expiration value + 4}}); + using namespace std::chrono_literals; + // get expired validator list testFetchList( {{"/validators", - "Invalid validator list", + "Applied 1 expired validator list(s)", + ssl, + false, + false, + 1, + 0s}}); + testFetchList( + {{"/validators2", + "Applied 1 expired validator list(s)", + ssl, + false, + false, + 1, + 0s, + -1s}}); + // force an out-of-range validUntil value + testFetchList( + {{"/validators", + "1 invalid validator list(s)", ssl, false, true, 1, std::chrono::seconds{Json::Value::maxInt + 1}}}); + // force an out-of-range validUntil value on the future list + // The first list is accepted. The second fails. The parser + // returns the "best" result, so this looks like a success. + testFetchList( + {{"/validators2", + "", + ssl, + false, + false, + 1, + std::chrono::seconds{Json::Value::maxInt - 300}, + 299s}}); + // force an out-of-range validFrom value + // The first list is accepted. The second fails. The parser + // returns the "best" result, so this looks like a success. + testFetchList( + {{"/validators2", + "", + ssl, + false, + false, + 1, + std::chrono::seconds{Json::Value::maxInt - 300}, + 301s}}); + // force an out-of-range validUntil value on _both_ lists + testFetchList( + {{"/validators2", + "2 invalid validator list(s)", + ssl, + false, + true, + 1, + std::chrono::seconds{Json::Value::maxInt + 1}, + std::chrono::seconds{Json::Value::maxInt - 6000}}}); // verify refresh intervals are properly clamped testFetchList( {{"/validators/refresh/0", @@ -488,6 +601,17 @@ public: false, 1, detail::default_expires, + detail::default_effective_overlap, + 1}}); // minimum of 1 minute + testFetchList( + {{"/validators2/refresh/0", + "", + ssl, + false, + false, + 1, + detail::default_expires, + detail::default_effective_overlap, 1}}); // minimum of 1 minute testFetchList( {{"/validators/refresh/10", @@ -497,6 +621,17 @@ public: false, 1, detail::default_expires, + detail::default_effective_overlap, + 10}}); // 10 minutes is fine + testFetchList( + {{"/validators2/refresh/10", + "", + ssl, + false, + false, + 1, + detail::default_expires, + detail::default_effective_overlap, 10}}); // 10 minutes is fine testFetchList( {{"/validators/refresh/2000", @@ -506,6 +641,17 @@ public: false, 1, detail::default_expires, + detail::default_effective_overlap, + 60 * 24}}); // max of 24 hours + testFetchList( + {{"/validators2/refresh/2000", + "", + ssl, + false, + false, + 1, + detail::default_expires, + detail::default_effective_overlap, 60 * 24}}); // max of 24 hours } testFileURLs(); diff --git a/src/test/consensus/NegativeUNL_test.cpp b/src/test/consensus/NegativeUNL_test.cpp index 7100600a5..6edbb5e24 100644 --- a/src/test/consensus/NegativeUNL_test.cpp +++ b/src/test/consensus/NegativeUNL_test.cpp @@ -1860,7 +1860,12 @@ class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite auto& local = *nUnlKeys.begin(); std::vector cfgPublishers; validators.load(local, cfgKeys, cfgPublishers); - validators.updateTrusted(activeValidators); + validators.updateTrusted( + activeValidators, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); BEAST_EXPECT(validators.getTrustedMasterKeys().size() == numNodes); validators.setNegativeUNL(nUnlKeys); BEAST_EXPECT(validators.getNegativeUNL().size() == negUnlSize); diff --git a/src/test/jtx/TrustedPublisherServer.h b/src/test/jtx/TrustedPublisherServer.h index ada2bb53e..c02196206 100644 --- a/src/test/jtx/TrustedPublisherServer.h +++ b/src/test/jtx/TrustedPublisherServer.h @@ -57,7 +57,12 @@ class TrustedPublisherServer socket_type sock_; endpoint_type ep_; boost::asio::ip::tcp::acceptor acceptor_; + // Generates a version 1 validator list, using the int parameter as the + // actual version. std::function getList_; + // Generates a version 2 validator list, using the int parameter as the + // actual version. + std::function getList2_; // The SSL context is required, and holds certificates bool useSSL_; @@ -91,6 +96,18 @@ class TrustedPublisherServer sslCtx_.use_tmp_dh(boost::asio::buffer(dh().data(), dh().size())); } + struct BlobInfo + { + BlobInfo(std::string b, std::string s) : blob(b), signature(s) + { + } + + // base-64 encoded JSON containing the validator list. + std::string blob; + // hex-encoded signature of the blob using the publisher's signing key + std::string signature; + }; + public: struct Validator { @@ -144,14 +161,19 @@ public: 1)}; } - // TrustedPublisherServer must be accessed through a shared_ptr + // TrustedPublisherServer must be accessed through a shared_ptr. // This constructor is only public so std::make_shared has access. // The function`make_TrustedPublisherServer` should be used to create // instances. + // The `futures` member is expected to be structured as + // effective / expiration time point pairs for use in version 2 UNLs TrustedPublisherServer( boost::asio::io_context& ioc, std::vector const& validators, - NetClock::time_point expiration, + NetClock::time_point validUntil, + std::vector< + std::pair> const& + futures, bool useSSL = false, int version = 1, bool immediateStart = true, @@ -170,29 +192,80 @@ public: auto const manifest = makeManifestString( publisherPublic_, publisherSecret_, keys.first, keys.second, 1); - std::string data = "{\"sequence\":" + std::to_string(sequence) + - ",\"expiration\":" + - std::to_string(expiration.time_since_epoch().count()) + - ",\"validators\":["; + std::vector blobInfo; + blobInfo.reserve(futures.size() + 1); + auto const [data, blob] = [&]() -> std::pair { + // Builds the validator list, then encodes it into a blob. + std::string data = "{\"sequence\":" + std::to_string(sequence) + + ",\"expiration\":" + + std::to_string(validUntil.time_since_epoch().count()) + + ",\"validators\":["; - for (auto const& val : validators) - { - data += "{\"validation_public_key\":\"" + strHex(val.masterPublic) + - "\",\"manifest\":\"" + val.manifest + "\"},"; - } - data.pop_back(); - data += "]}"; - std::string blob = base64_encode(data); - auto const sig = sign(keys.first, keys.second, makeSlice(data)); - getList_ = [blob, sig, manifest, version](int interval) { + for (auto const& val : validators) + { + data += "{\"validation_public_key\":\"" + + strHex(val.masterPublic) + "\",\"manifest\":\"" + + val.manifest + "\"},"; + } + data.pop_back(); + data += "]}"; + std::string blob = base64_encode(data); + return std::make_pair(data, blob); + }(); + auto const sig = strHex(sign(keys.first, keys.second, makeSlice(data))); + blobInfo.emplace_back(blob, sig); + getList_ = [blob = blob, sig, manifest, version](int interval) { + // Build the contents of a version 1 format UNL file std::stringstream l; l << "{\"blob\":\"" << blob << "\"" - << ",\"signature\":\"" << strHex(sig) << "\"" + << ",\"signature\":\"" << sig << "\"" << ",\"manifest\":\"" << manifest << "\"" << ",\"refresh_interval\": " << interval << ",\"version\":" << version << '}'; return l.str(); }; + for (auto const& future : futures) + { + std::string data = "{\"sequence\":" + std::to_string(++sequence) + + ",\"effective\":" + + std::to_string(future.first.time_since_epoch().count()) + + ",\"expiration\":" + + std::to_string(future.second.time_since_epoch().count()) + + ",\"validators\":["; + + // Use the same set of validators for simplicity + for (auto const& val : validators) + { + data += "{\"validation_public_key\":\"" + + strHex(val.masterPublic) + "\",\"manifest\":\"" + + val.manifest + "\"},"; + } + data.pop_back(); + data += "]}"; + std::string blob = base64_encode(data); + auto const sig = + strHex(sign(keys.first, keys.second, makeSlice(data))); + blobInfo.emplace_back(blob, sig); + } + getList2_ = [blobInfo, manifest, version](int interval) { + // Build the contents of a version 2 format UNL file + // Use `version + 1` to get 2 for most tests, but have + // a "bad" version number for tests that provide an override. + std::stringstream l; + for (auto const& info : blobInfo) + { + l << "{\"blob\":\"" << info.blob << "\"" + << ",\"signature\":\"" << info.signature << "\"},"; + } + std::string blobs = l.str(); + blobs.pop_back(); + l.str(std::string()); + l << "{\"blobs_v2\": [ " << blobs << "],\"manifest\":\"" << manifest + << "\"" + << ",\"refresh_interval\": " << interval + << ",\"version\":" << (version + 1) << '}'; + return l.str(); + }; if (useSSL_) { @@ -505,7 +578,26 @@ private: res.keep_alive(req.keep_alive()); bool prepare = true; - if (boost::starts_with(path, "/validators")) + if (boost::starts_with(path, "/validators2")) + { + res.result(http::status::ok); + res.insert("Content-Type", "application/json"); + if (path == "/validators2/bad") + res.body() = "{ 'bad': \"2']"; + else if (path == "/validators2/missing") + res.body() = "{\"version\": 2}"; + else + { + int refresh = 5; + constexpr char const* refreshPrefix = + "/validators2/refresh/"; + if (boost::starts_with(path, refreshPrefix)) + refresh = boost::lexical_cast( + path.substr(strlen(refreshPrefix))); + res.body() = getList2_(refresh); + } + } + else if (boost::starts_with(path, "/validators")) { res.result(http::status::ok); res.insert("Content-Type", "application/json"); @@ -516,9 +608,11 @@ private: else { int refresh = 5; - if (boost::starts_with(path, "/validators/refresh")) + constexpr char const* refreshPrefix = + "/validators/refresh/"; + if (boost::starts_with(path, refreshPrefix)) refresh = boost::lexical_cast( - path.substr(20)); + path.substr(strlen(refreshPrefix))); res.body() = getList_(refresh); } } @@ -618,14 +712,16 @@ inline std::shared_ptr make_TrustedPublisherServer( boost::asio::io_context& ioc, std::vector const& validators, - NetClock::time_point expiration, + NetClock::time_point validUntil, + std::vector> const& + futures, bool useSSL = false, int version = 1, bool immediateStart = true, int sequence = 1) { auto const r = std::make_shared( - ioc, validators, expiration, useSSL, version, sequence); + ioc, validators, validUntil, futures, useSSL, version, sequence); if (immediateStart) r->start(); return r; diff --git a/src/test/net/DatabaseDownloader_test.cpp b/src/test/net/DatabaseDownloader_test.cpp index 7be70e004..c15f8e9b9 100644 --- a/src/test/net/DatabaseDownloader_test.cpp +++ b/src/test/net/DatabaseDownloader_test.cpp @@ -40,6 +40,8 @@ class DatabaseDownloader_test : public beast::unit_test::suite env.app().getIOService(), list, env.timeKeeper().now() + std::chrono::seconds{3600}, + // No future VLs + {}, ssl); } diff --git a/src/test/overlay/compression_test.cpp b/src/test/overlay/compression_test.cpp index 041659b37..a83aa09b1 100644 --- a/src/test/overlay/compression_test.cpp +++ b/src/test/overlay/compression_test.cpp @@ -344,11 +344,43 @@ public: return list; } + std::shared_ptr + buildValidatorListCollection() + { + auto list = std::make_shared(); + + auto master = randomKeyPair(KeyType::ed25519); + auto signing = randomKeyPair(KeyType::ed25519); + STObject st(sfGeneric); + st[sfSequence] = 0; + st[sfPublicKey] = std::get<0>(master); + st[sfSigningPubKey] = std::get<0>(signing); + st[sfDomain] = makeSlice(std::string("example.com")); + sign( + st, + HashPrefix::manifest, + KeyType::ed25519, + std::get<1>(master), + sfMasterSignature); + sign(st, HashPrefix::manifest, KeyType::ed25519, std::get<1>(signing)); + Serializer s; + st.add(s); + list->set_manifest(s.data(), s.size()); + list->set_version(4); + STObject signature(sfSignature); + ripple::sign( + st, HashPrefix::manifest, KeyType::ed25519, std::get<1>(signing)); + Serializer s1; + st.add(s1); + auto& blob = *list->add_blobs(); + blob.set_signature(s1.data(), s1.size()); + blob.set_blob(strHex(s.getString())); + return list; + } + void testProtocol() { - testcase("Message Compression"); - auto thresh = beast::severities::Severity::kInfo; auto logs = std::make_unique(thresh); @@ -359,6 +391,7 @@ public: protocol::TMLedgerData ledger_data; protocol::TMGetObjectByHash get_object; protocol::TMValidatorList validator_list; + protocol::TMValidatorListCollection validator_list_collection; // 4.5KB doTest(buildManifests(20), protocol::mtMANIFESTS, 4, "TMManifests20"); @@ -418,6 +451,11 @@ public: protocol::mtVALIDATORLIST, 4, "TMValidatorList"); + doTest( + buildValidatorListCollection(), + protocol::mtVALIDATORLISTCOLLECTION, + 4, + "TMValidatorListCollection"); } void diff --git a/src/test/rpc/ShardArchiveHandler_test.cpp b/src/test/rpc/ShardArchiveHandler_test.cpp index 98d18b2f3..d4452fc29 100644 --- a/src/test/rpc/ShardArchiveHandler_test.cpp +++ b/src/test/rpc/ShardArchiveHandler_test.cpp @@ -46,6 +46,8 @@ class ShardArchiveHandler_test : public beast::unit_test::suite env.app().getIOService(), list, env.timeKeeper().now() + std::chrono::seconds{3600}, + // No future VLs + {}, ssl); } diff --git a/src/test/rpc/ValidatorRPC_test.cpp b/src/test/rpc/ValidatorRPC_test.cpp index 54cdefa12..c9b11cdb9 100644 --- a/src/test/rpc/ValidatorRPC_test.cpp +++ b/src/test/rpc/ValidatorRPC_test.cpp @@ -189,12 +189,20 @@ public: // Manage single-thread io_service for server. BasicApp worker{1}; using namespace std::chrono_literals; - NetClock::time_point const expiration{3600s}; + NetClock::time_point const validUntil{3600s}; + NetClock::time_point const validFrom2{validUntil - 60s}; + NetClock::time_point const validUntil2{validFrom2 + 3600s}; auto server = make_TrustedPublisherServer( - worker.get_io_service(), validators, expiration, false, 1, false); + worker.get_io_service(), + validators, + validUntil, + {{validFrom2, validUntil2}}, + false, + 1, + false); //---------------------------------------------------------------------- - // Publisher list site unavailable + // Publisher list site unavailable v1 { // Publisher site information using namespace std::string_literals; @@ -261,11 +269,78 @@ public: } } } + // Publisher list site unavailable v2 + { + // Publisher site information + using namespace std::string_literals; + std::string siteURI = + "http://"s + getEnvLocalhostAddr() + ":1234/validators2"; + + Env env{ + *this, + envconfig([&](std::unique_ptr cfg) { + cfg->section(SECTION_VALIDATOR_LIST_SITES).append(siteURI); + cfg->section(SECTION_VALIDATOR_LIST_KEYS) + .append(strHex(server->publisherPublic())); + return cfg; + }), + }; + + env.app().validatorSites().start(); + env.app().validatorSites().join(); + + { + auto const jrr = env.rpc("server_info")[jss::result]; + BEAST_EXPECT( + jrr[jss::info][jss::validator_list][jss::expiration] == + "unknown"); + } + { + auto const jrr = env.rpc("server_state")[jss::result]; + BEAST_EXPECT( + jrr[jss::state][jss::validator_list_expires].asInt() == 0); + } + { + auto const jrr = env.rpc("validators")[jss::result]; + BEAST_EXPECT( + jrr[jss::validation_quorum].asUInt() == + std::numeric_limits::max()); + BEAST_EXPECT(jrr[jss::local_static_keys].size() == 0); + BEAST_EXPECT(jrr[jss::trusted_validator_keys].size() == 0); + BEAST_EXPECT( + jrr[jss::validator_list][jss::expiration] == "unknown"); + + if (BEAST_EXPECT(jrr[jss::publisher_lists].size() == 1)) + { + auto jp = jrr[jss::publisher_lists][0u]; + BEAST_EXPECT(jp[jss::available] == false); + BEAST_EXPECT(jp[jss::list].size() == 0); + BEAST_EXPECT(!jp.isMember(jss::seq)); + BEAST_EXPECT(!jp.isMember(jss::expiration)); + BEAST_EXPECT(!jp.isMember(jss::version)); + BEAST_EXPECT( + jp[jss::pubkey_publisher] == + strHex(server->publisherPublic())); + } + BEAST_EXPECT(jrr[jss::signing_keys].size() == 0); + } + { + auto const jrr = env.rpc("validator_list_sites")[jss::result]; + if (BEAST_EXPECT(jrr[jss::validator_sites].size() == 1)) + { + auto js = jrr[jss::validator_sites][0u]; + BEAST_EXPECT(js[jss::refresh_interval_min].asUInt() == 5); + BEAST_EXPECT(js[jss::uri] == siteURI); + BEAST_EXPECT(js.isMember(jss::last_refresh_time)); + BEAST_EXPECT(js[jss::last_refresh_status] == "invalid"); + } + } + } //---------------------------------------------------------------------- // Publisher list site available + server->start(); + // Publisher list site available v1 { - server->start(); - std::stringstream uri; uri << "http://" << server->local_endpoint() << "/validators"; auto siteURI = uri.str(); @@ -286,26 +361,31 @@ public: for (auto const& val : validators) startKeys.insert(calcNodeID(val.masterPublic)); - env.app().validators().updateTrusted(startKeys); + env.app().validators().updateTrusted( + startKeys, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); { auto const jrr = env.rpc("server_info")[jss::result]; BEAST_EXPECT( jrr[jss::info][jss::validator_list][jss::expiration] == - to_string(expiration)); + to_string(validUntil)); } { auto const jrr = env.rpc("server_state")[jss::result]; BEAST_EXPECT( jrr[jss::state][jss::validator_list_expires].asUInt() == - expiration.time_since_epoch().count()); + validUntil.time_since_epoch().count()); } { auto const jrr = env.rpc("validators")[jss::result]; BEAST_EXPECT(jrr[jss::validation_quorum].asUInt() == 2); BEAST_EXPECT( jrr[jss::validator_list][jss::expiration] == - to_string(expiration)); + to_string(validUntil)); BEAST_EXPECT(jrr[jss::local_static_keys].size() == 0); BEAST_EXPECT( @@ -334,7 +414,7 @@ public: BEAST_EXPECT( jp[jss::pubkey_publisher] == strHex(server->publisherPublic())); - BEAST_EXPECT(jp[jss::expiration] == to_string(expiration)); + BEAST_EXPECT(jp[jss::expiration] == to_string(validUntil)); BEAST_EXPECT(jp[jss::version] == 1); } auto jsk = jrr[jss::signing_keys]; @@ -361,6 +441,129 @@ public: } } } + // Publisher list site available v2 + { + std::stringstream uri; + uri << "http://" << server->local_endpoint() << "/validators2"; + auto siteURI = uri.str(); + + Env env{ + *this, + envconfig([&](std::unique_ptr cfg) { + cfg->section(SECTION_VALIDATOR_LIST_SITES).append(siteURI); + cfg->section(SECTION_VALIDATOR_LIST_KEYS) + .append(strHex(server->publisherPublic())); + return cfg; + }), + }; + + env.app().validatorSites().start(); + env.app().validatorSites().join(); + hash_set startKeys; + for (auto const& val : validators) + startKeys.insert(calcNodeID(val.masterPublic)); + + env.app().validators().updateTrusted( + startKeys, + env.timeKeeper().now(), + env.app().getOPs(), + env.app().overlay(), + env.app().getHashRouter()); + + { + auto const jrr = env.rpc("server_info")[jss::result]; + BEAST_EXPECT( + jrr[jss::info][jss::validator_list][jss::expiration] == + to_string(validUntil2)); + } + { + auto const jrr = env.rpc("server_state")[jss::result]; + BEAST_EXPECT( + jrr[jss::state][jss::validator_list_expires].asUInt() == + validUntil2.time_since_epoch().count()); + } + { + auto const jrr = env.rpc("validators")[jss::result]; + BEAST_EXPECT(jrr[jss::validation_quorum].asUInt() == 2); + BEAST_EXPECT( + jrr[jss::validator_list][jss::expiration] == + to_string(validUntil2)); + BEAST_EXPECT(jrr[jss::local_static_keys].size() == 0); + + BEAST_EXPECT( + jrr[jss::trusted_validator_keys].size() == + expectedKeys.size()); + for (auto const& jKey : jrr[jss::trusted_validator_keys]) + { + BEAST_EXPECT(expectedKeys.count(jKey.asString()) == 1); + } + + if (BEAST_EXPECT(jrr[jss::publisher_lists].size() == 1)) + { + auto jp = jrr[jss::publisher_lists][0u]; + BEAST_EXPECT(jp[jss::available] == true); + if (BEAST_EXPECT(jp[jss::list].size() == 2)) + { + // check entries + std::set foundKeys; + for (auto const& k : jp[jss::list]) + { + foundKeys.insert(k.asString()); + } + BEAST_EXPECT(foundKeys == expectedKeys); + } + BEAST_EXPECT(jp[jss::seq].asUInt() == 1); + BEAST_EXPECT( + jp[jss::pubkey_publisher] == + strHex(server->publisherPublic())); + BEAST_EXPECT(jp[jss::expiration] == to_string(validUntil)); + BEAST_EXPECT(jp[jss::version] == 2); + if (BEAST_EXPECT(jp.isMember(jss::remaining)) && + BEAST_EXPECT(jp[jss::remaining].isArray()) && + BEAST_EXPECT(jp[jss::remaining].size() == 1)) + { + auto const& r = jp[jss::remaining][0u]; + if (BEAST_EXPECT(r[jss::list].size() == 2)) + { + // check entries + std::set foundKeys; + for (auto const& k : r[jss::list]) + { + foundKeys.insert(k.asString()); + } + BEAST_EXPECT(foundKeys == expectedKeys); + } + BEAST_EXPECT(r[jss::seq].asUInt() == 2); + BEAST_EXPECT( + r[jss::effective] == to_string(validFrom2)); + BEAST_EXPECT( + r[jss::expiration] == to_string(validUntil2)); + } + } + auto jsk = jrr[jss::signing_keys]; + BEAST_EXPECT(jsk.size() == 2); + for (auto const& val : validators) + { + BEAST_EXPECT(jsk.isMember(toStr(val.masterPublic))); + BEAST_EXPECT( + jsk[toStr(val.masterPublic)] == + toStr(val.signingPublic)); + } + } + { + auto const jrr = env.rpc("validator_list_sites")[jss::result]; + if (BEAST_EXPECT(jrr[jss::validator_sites].size() == 1)) + { + auto js = jrr[jss::validator_sites][0u]; + BEAST_EXPECT(js[jss::refresh_interval_min].asUInt() == 5); + BEAST_EXPECT(js[jss::uri] == siteURI); + BEAST_EXPECT(js[jss::last_refresh_status] == "accepted"); + // The actual time of the update will vary run to run, so + // just verify the time is there + BEAST_EXPECT(js.isMember(jss::last_refresh_time)); + } + } + } } void