Proposed 2.0.0-rc2 (#4818)

* Support for the mold linker (#4807)

* Promote API version 2 to supported (#4803)

* Promote API version 2 to be supported

* Switch the command line to API version 1

* Fix LedgerRequestRPC test

* Remove obsolete tx_account method

This method is not implemented, the only parts which are removed are related to command-line parsing

* Fix RPCCall test

* Reduce diff size, small test improvements

* Minor fixes

* Support for the mold linker

* Fix TransactionEntry_test

* Fix AccountTx_test

---------

Co-authored-by: seelabs <scott.determan@yahoo.com>

* Update Linux smoketest distros (#4813)

* Fix 2.0 regression in tx method with binary output (#4812)

* Fix binary output from tx method

* Formatting fix

* Minor test improvement

* Minor test improvements

* Optimize calculation of close time to avoid impasse and minimize gratuitous proposal changes (#4760)

* Optimize the calculation of close time to avoid
impasse and minimize gratuitous proposal changes.

* git apply clang-format.patch

* Scott S review fixes. Also clang-format.

* Set version to 2.0.0-rc2

---------

Co-authored-by: manoj <mdoshi@ripple.com>
Co-authored-by: Scott Determan <scott.determan@yahoo.com>
Co-authored-by: Bronek Kozicki <brok@incorrekt.com>
Co-authored-by: Michael Legleux <legleux@users.noreply.github.com>
Co-authored-by: Mark Travis <mtrippled@users.noreply.github.com>
This commit is contained in:
manoj
2023-11-20 13:34:59 -08:00
committed by GitHub
parent cf4e9e5578
commit 4977a5d43c
17 changed files with 723 additions and 707 deletions

View File

@@ -131,7 +131,16 @@ else ()
>)
endif ()
if (use_gold AND is_gcc)
if (use_mold)
# use mold linker if available
execute_process (
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=mold -Wl,--version
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
if ("${LD_VERSION}" MATCHES "mold")
target_link_libraries (common INTERFACE -fuse-ld=mold)
endif ()
unset (LD_VERSION)
elseif (use_gold AND is_gcc)
# use gold linker if available
execute_process (
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=gold -Wl,--version
@@ -163,9 +172,7 @@ if (use_gold AND is_gcc)
$<$<NOT:$<BOOL:${static}>>:-Wl,--disable-new-dtags>)
endif ()
unset (LD_VERSION)
endif ()
if (use_lld)
elseif (use_lld)
# use lld linker if available
execute_process (
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=lld -Wl,--version
@@ -176,6 +183,7 @@ if (use_lld)
unset (LD_VERSION)
endif()
if (assert)
foreach (var_ CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE)
STRING (REGEX REPLACE "[-/]DNDEBUG" "" ${var_} "${${var_}}")

View File

@@ -37,6 +37,7 @@ if (is_linux)
option (static "link protobuf, openssl, libc++, and boost statically" ON)
option (perf "Enables flags that assist with perf recording" OFF)
option (use_gold "enables detection of gold (binutils) linker" ON)
option (use_mold "enables detection of mold (binutils) linker" ON)
else ()
# we are not ready to allow shared-libs on windows because it would require
# export declarations. On macos it's more feasible, but static openssl
@@ -45,6 +46,7 @@ else ()
set (static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
set (perf OFF CACHE BOOL "perf flags, linux only" FORCE)
set (use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
set (use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
endif ()
if (is_clang)
option (use_lld "enables detection of lld linker" ON)

View File

@@ -193,12 +193,30 @@ rocky_8_smoketest:
name: artifactory.ops.ripple.com/rockylinux/rockylinux:8
<<: *run_local_smoketest
fedora_37_smoketest:
rocky_9_smoketest:
stage: smoketest
dependencies:
- rpm_build
image:
name: artifactory.ops.ripple.com/fedora:37
name: artifactory.ops.ripple.com/rockylinux/rockylinux:9
<<: *run_local_smoketest
alma_8_smoketest:
stage: smoketest
dependencies:
- rpm_build
image:
name: artifactory.ops.ripple.com/almalinux:8
<<: *run_local_smoketest
alma_9_smoketest:
stage: smoketest
dependencies:
- rpm_build
image:
name: artifactory.ops.ripple.com/almalinux:9
<<: *run_local_smoketest
fedora_38_smoketest:
@@ -209,6 +227,14 @@ fedora_38_smoketest:
name: artifactory.ops.ripple.com/fedora:38
<<: *run_local_smoketest
fedora_39_smoketest:
stage: smoketest
dependencies:
- rpm_build
image:
name: artifactory.ops.ripple.com/fedora:39
<<: *run_local_smoketest
ubuntu_18_smoketest:
stage: smoketest
dependencies:
@@ -249,6 +275,14 @@ debian_11_smoketest:
name: artifactory.ops.ripple.com/debian:11
<<: *run_local_smoketest
debian_12_smoketest:
stage: smoketest
dependencies:
- dpkg_build
image:
name: artifactory.ops.ripple.com/debian:12
<<: *run_local_smoketest
#########################################################################
## ##
## stage: verify_sig ##
@@ -345,7 +379,6 @@ centos_7_verify_repo_test:
<<: *only_primary
<<: *run_repo_smoketest
rocky_8_verify_repo_test:
stage: verify_from_test
variables:
@@ -357,12 +390,34 @@ rocky_8_verify_repo_test:
<<: *only_primary
<<: *run_repo_smoketest
fedora_37_verify_repo_test:
rocky_9_verify_repo_test:
stage: verify_from_test
variables:
RPM_REPO: "rippled-rpm-test-mirror"
image:
name: artifactory.ops.ripple.com/fedora:37
name: artifactory.ops.ripple.com/rockylinux/rockylinux:9
dependencies:
- rpm_sign
<<: *only_primary
<<: *run_repo_smoketest
almalinux_8_verify_repo_test:
stage: verify_from_test
variables:
RPM_REPO: "rippled-rpm-test-mirror"
image:
name: artifactory.ops.ripple.com/almalinux:8
dependencies:
- rpm_sign
<<: *only_primary
<<: *run_repo_smoketest
almalinux_9_verify_repo_test:
stage: verify_from_test
variables:
RPM_REPO: "rippled-rpm-test-mirror"
image:
name: artifactory.ops.ripple.com/almalinux:9
dependencies:
- rpm_sign
<<: *only_primary
@@ -379,6 +434,18 @@ fedora_38_verify_repo_test:
<<: *only_primary
<<: *run_repo_smoketest
fedora_39_verify_repo_test:
stage: verify_from_test
variables:
RPM_REPO: "rippled-rpm-test-mirror"
image:
name: artifactory.ops.ripple.com/fedora:39
dependencies:
- rpm_sign
<<: *only_primary
<<: *run_repo_smoketest
ubuntu_18_verify_repo_test:
stage: verify_from_test
variables:
@@ -439,6 +506,18 @@ debian_11_verify_repo_test:
<<: *only_primary
<<: *run_repo_smoketest
debian_12_verify_repo_test:
stage: verify_from_test
variables:
DISTRO: "bookworm"
DEB_REPO: "rippled-deb-test-mirror"
image:
name: artifactory.ops.ripple.com/debian:12
dependencies:
- dpkg_sign
<<: *only_primary
<<: *run_repo_smoketest
#########################################################################
## ##
## stage: wait_approval_prod ##
@@ -515,6 +594,39 @@ rocky_8_verify_repo_prod:
<<: *only_primary
<<: *run_repo_smoketest
rocky_9_verify_repo_prod:
stage: verify_from_prod
variables:
RPM_REPO: "rippled-rpm"
image:
name: artifactory.ops.ripple.com/rockylinux/rockylinux:9
dependencies:
- rpm_sign
<<: *only_primary
<<: *run_repo_smoketest
alma_8_verify_repo_prod:
stage: verify_from_prod
variables:
RPM_REPO: "rippled-rpm"
image:
name: artifactory.ops.ripple.com/almalinux:8
dependencies:
- rpm_sign
<<: *only_primary
<<: *run_repo_smoketest
alma_9_verify_repo_prod:
stage: verify_from_prod
variables:
RPM_REPO: "rippled-rpm"
image:
name: artifactory.ops.ripple.com/almalinux:9
dependencies:
- rpm_sign
<<: *only_primary
<<: *run_repo_smoketest
fedora_37_verify_repo_prod:
stage: verify_from_prod
variables:
@@ -597,6 +709,19 @@ debian_11_verify_repo_prod:
<<: *only_primary
<<: *run_repo_smoketest
debian_12_verify_repo_prod:
stage: verify_from_prod
variables:
DISTRO: "bookworm"
DEB_REPO: "rippled-deb"
image:
name: artifactory.ops.ripple.com/debian:12
dependencies:
- dpkg_sign
<<: *only_primary
<<: *run_repo_smoketest
#########################################################################
## ##
## stage: get_final_hashes ##

View File

@@ -16,7 +16,7 @@ case ${ID} in
ubuntu|debian)
pkgtype="dpkg"
;;
fedora|centos|rhel|scientific|rocky)
fedora|centos|rhel|scientific|rocky|almalinux)
pkgtype="rpm"
;;
*)
@@ -79,16 +79,8 @@ else
# yum --showduplicates list rippled
yum -y install ${rpm_version_release}
elif [ "${install_from}" = "local" ] ; then
# cached pkg install
pkgs=("yum-utils openssl-static zlib-static")
if [[ "$ID" =~ rocky|fedora ]]; then
if [[ "$ID" =~ "rocky" ]]; then
sed -i 's/enabled=0/enabled=1/g' /etc/yum.repos.d/Rocky-PowerTools.repo
fi
pkgs="${pkgs[@]/openssl-static}"
fi
yum install -y $pkgs
rm -f build/rpm/packages/rippled-debug*.rpm
rm -f build/rpm/packages/rippled-devel*.rpm
rm -f build/rpm/packages/*.src.rpm
rpm -i build/rpm/packages/*.rpm
else

View File

@@ -171,7 +171,8 @@ Transaction::getJson(JsonOptions options, bool binary) const
Json::Value ret(
mTransaction->getJson(options & ~JsonOptions::include_date, binary));
if (mLedgerIndex)
// NOTE Binary STTx::getJson output might not be a JSON object
if (ret.isObject() && mLedgerIndex)
{
if (!(options & JsonOptions::disable_API_prior_V2))
{

View File

@@ -1759,15 +1759,27 @@ Consensus<Adaptor>::updateOurPositions(bool const share)
else
{
int neededWeight;
// It's possible to be at a close time impasse (described below), so
// keep track of whether this round has taken a long time.
bool stuck = false;
if (convergePercent_ < parms.avMID_CONSENSUS_TIME)
{
neededWeight = parms.avINIT_CONSENSUS_PCT;
}
else if (convergePercent_ < parms.avLATE_CONSENSUS_TIME)
{
neededWeight = parms.avMID_CONSENSUS_PCT;
}
else if (convergePercent_ < parms.avSTUCK_CONSENSUS_TIME)
{
neededWeight = parms.avLATE_CONSENSUS_PCT;
}
else
{
neededWeight = parms.avSTUCK_CONSENSUS_PCT;
stuck = true;
}
int participants = currPeerPositions_.size();
if (mode_.get() == ConsensusMode::proposing)
@@ -1787,57 +1799,156 @@ Consensus<Adaptor>::updateOurPositions(bool const share)
<< " nw:" << neededWeight << " thrV:" << threshVote
<< " thrC:" << threshConsensus;
// An impasse is possible unless a validator pretends to change
// its close time vote. Imagine 5 validators. 3 have positions
// for close time t1, and 2 with t2. That's an impasse because
// 75% will never be met. However, if one of the validators voting
// for t2 switches to t1, then that will be 80% and sufficient
// to break the impasse. It's also OK for those agreeing
// with the 3 to pretend to vote for the one with 2, because
// that will never exceed the threshold of 75%, even with as
// few as 3 validators. The most it can achieve is 2/3.
for (auto& [t, v] : closeTimeVotes)
// Choose a close time and decide whether there is consensus for it.
//
// Close time is chosen based on the threshVote threshold
// calculated above. If a close time has votes equal to or greater than
// that threshold, then that is the best close time. If multiple
// close times have an equal number of votes, then choose the greatest
// of them. Ensure that our close time then matches that which meets
// the criteria. But if no close time meet the criteria, make no
// changes.
//
// This is implemented slightly differently for validators vs
// non-validators. For non-validators, it is sufficient to merely
// count the close times from all peer positions to determine
// the best. Validators, however, risk putting the network into an
// impasse unless they are able to change their own position without
// first having counted it towards the close time totals.
//
// Here's how the impasse could occur:
// Imagine 5 validators. 3 have close time t1, and 2 have t2.
// As consensus time increases, the threshVote threshold also increases.
// Once threshVote exceeds 60%, no members of either set of validators
// will change their close times.
//
// Avoiding the impasse means that validators should identify
// whether they currently have the best close time. First, choose
// the close time with the most votes. However, if multiple close times
// have the same number of votes, pick the latest of them.
// If the validator does not currently have the best close time,
// switch to it and increase the local vote tally for that better
// close time. This will result in consensus in the next iteration
// assuming that the peer messages propagate successfully.
// In this case the validators in set t1 will remain the same but
// those in t2 switch to t1.
//
// Another wrinkle, however, is that too many position changes
// from validators also has a destabilizing affect. Consensus can
// take longer if peers have to keep re-calculating positions,
// and mistakes can be made if peers settle on the wrong position.
// Therefore, avoiding an impasse should also minimize the likelihood
// of gratuitous change of position.
//
// The solution for validators is to first track whether it's
// possible that the network is at an impasse based on how much
// time this current consensus round has taken. This is reflected
// in the "stuck" boolean. When stuck, validators perform the
// above-described position change based solely on whether or not
// they agree with the best position, and ignore the threshVote
// criteria used for the earlier part of the phase.
//
// Determining whether there is close time consensus is very simple
// in comparison: if votes for the best close time meet or exceed
// threshConsensus, then we have close time consensus. Otherwise, not.
// First, find the best close time with which to agree: first criteria
// is the close time with the most votes. If a tie, the latest
// close time of those tied for the maximum number of votes.
std::multimap<int, NetClock::time_point> votesByCloseTime;
std::stringstream ss;
ss << "Close time calculation for ledger sequence "
<< static_cast<std::uint32_t>(previousLedger_.seq()) + 1
<< " Close times and vote count are as follows: ";
bool first = true;
for (auto const& [closeTime, voteCount] : closeTimeVotes)
{
if (adaptor_.validating() &&
t != asCloseTime(result_->position.closeTime()))
{
JLOG(j_.debug()) << "Others have voted for a close time "
"different than ours. Adding our vote "
"to this one in case it is necessary "
"to break an impasse.";
++v;
}
JLOG(j_.debug())
<< "CCTime: seq "
<< static_cast<std::uint32_t>(previousLedger_.seq()) + 1 << ": "
<< t.time_since_epoch().count() << " has " << v << ", "
<< threshVote << " required";
if (first)
first = false;
else
ss << ',';
votesByCloseTime.insert({voteCount, closeTime});
ss << closeTime.time_since_epoch().count() << ':' << voteCount;
}
// These always gets populated because currPeerPositions_ is not
// empty to end up here, so at least 1 close time has at least 1 vote.
assert(!currPeerPositions_.empty());
std::optional<int> maxVote;
std::set<NetClock::time_point> maxCloseTimes;
// Highest vote getter is last. Track each close time that is tied
// with the highest.
for (auto rit = votesByCloseTime.crbegin();
rit != votesByCloseTime.crend();
++rit)
{
int const voteCount = rit->first;
if (!maxVote.has_value())
maxVote = voteCount;
else if (voteCount < *maxVote)
break;
maxCloseTimes.insert(rit->second);
}
// The best close time is the latest close time of those that have
// the maximum number of votes.
NetClock::time_point const bestCloseTime = *maxCloseTimes.crbegin();
ss << ". The best close time has the most votes. If there is a tie, "
"choose the latest. This is "
<< bestCloseTime.time_since_epoch().count() << "with " << *maxVote
<< " votes. ";
if (v >= threshVote)
// If we are a validator potentially at an impasse and our own close
// time is not the best, change our close time to match it and
// tally another vote for it.
if (adaptor_.validating() && stuck &&
consensusCloseTime != bestCloseTime)
{
consensusCloseTime = bestCloseTime;
++*maxVote;
ss << " We are a validator. Consensus has taken "
<< result_->roundTime.read().count()
<< "ms. Previous round "
"took "
<< prevRoundTime_.count()
<< "ms. Now changing our "
"close time to "
<< bestCloseTime.time_since_epoch().count()
<< " that "
"now has "
<< *maxVote << " votes.";
}
// If the close time with the most votes also meets or exceeds the
// threshold to change our position, then change our position.
// Then check if we have met or exceeded the threshold for close
// time consensus.
//
// The 2nd check has been nested within the first historically.
// It's possible that this can be optimized by doing the
// 2nd check independently--this may make consensus happen faster in
// some cases. Then again, the trade-offs have not been modelled.
if (*maxVote >= threshVote)
{
consensusCloseTime = bestCloseTime;
ss << "Close time " << bestCloseTime.time_since_epoch().count()
<< " has " << *maxVote << " votes, which is >= the threshold ("
<< threshVote
<< " to make that our position if it isn't already.";
if (*maxVote >= threshConsensus)
{
// A close time has enough votes for us to try to agree
consensusCloseTime = t;
threshVote = v;
if (threshVote >= threshConsensus)
{
haveCloseTimeConsensus_ = true;
// Make sure that the winning close time is the one
// that propagates to the rest of the function.
break;
}
haveCloseTimeConsensus_ = true;
ss << " The maximum votes also >= the threshold ("
<< threshConsensus << ") for consensus.";
}
}
if (!haveCloseTimeConsensus_)
{
JLOG(j_.debug())
<< "No CT consensus:"
<< " Proposers:" << currPeerPositions_.size()
<< " Mode:" << to_string(mode_.get())
<< " Thresh:" << threshConsensus
<< " Pos:" << consensusCloseTime.time_since_epoch().count();
ss << " No CT consensus:"
<< " Proposers:" << currPeerPositions_.size()
<< " Mode:" << to_string(mode_.get())
<< " Thresh:" << threshConsensus
<< " Pos:" << consensusCloseTime.time_since_epoch().count();
}
JLOG(j_.debug()) << ss.str();
}
if (!ourNewSet &&

View File

@@ -91,6 +91,7 @@ createHTTPPost(
class RPCParser
{
private:
unsigned const apiVersion_;
beast::Journal const j_;
// TODO New routine for parsing ledger parameters, other routines should
@@ -321,8 +322,7 @@ private:
if (uLedgerMax != -1 && uLedgerMax < uLedgerMin)
{
// The command line always follows apiMaximumSupportedVersion
if (RPC::apiMaximumSupportedVersion == 1)
if (apiVersion_ == 1)
return rpcError(rpcLGR_IDXS_INVALID);
return rpcError(rpcNOT_SYNCED);
}
@@ -340,76 +340,6 @@ private:
return jvRequest;
}
// tx_account accountID [ledger_min [ledger_max [limit]]]] [binary] [count]
// [forward]
Json::Value
parseTxAccount(Json::Value const& jvParams)
{
Json::Value jvRequest(Json::objectValue);
unsigned int iParams = jvParams.size();
auto const account = parseBase58<AccountID>(jvParams[0u].asString());
if (!account)
return rpcError(rpcACT_MALFORMED);
jvRequest[jss::account] = toBase58(*account);
bool bDone = false;
while (!bDone && iParams >= 2)
{
if (jvParams[iParams - 1].asString() == jss::binary)
{
jvRequest[jss::binary] = true;
--iParams;
}
else if (jvParams[iParams - 1].asString() == jss::count)
{
jvRequest[jss::count] = true;
--iParams;
}
else if (jvParams[iParams - 1].asString() == jss::forward)
{
jvRequest[jss::forward] = true;
--iParams;
}
else
{
bDone = true;
}
}
if (1 == iParams)
{
}
else if (2 == iParams)
{
if (!jvParseLedger(jvRequest, jvParams[1u].asString()))
return jvRequest;
}
else
{
std::int64_t uLedgerMin = jvParams[1u].asInt();
std::int64_t uLedgerMax = jvParams[2u].asInt();
if (uLedgerMax != -1 && uLedgerMax < uLedgerMin)
{
// The command line always follows apiMaximumSupportedVersion
if (RPC::apiMaximumSupportedVersion == 1)
return rpcError(rpcLGR_IDXS_INVALID);
return rpcError(rpcNOT_SYNCED);
}
jvRequest[jss::ledger_index_min] = jvParams[1u].asInt();
jvRequest[jss::ledger_index_max] = jvParams[2u].asInt();
if (iParams >= 4)
jvRequest[jss::limit] = jvParams[3u].asInt();
}
return jvRequest;
}
// book_offers <taker_pays> <taker_gets> [<taker> [<ledger> [<limit>
// [<proof> [<marker>]]]]] limit: 0 = no limit proof: 0 or 1
//
@@ -1221,7 +1151,8 @@ private:
public:
//--------------------------------------------------------------------------
explicit RPCParser(beast::Journal j) : j_(j)
explicit RPCParser(unsigned apiVersion, beast::Journal j)
: apiVersion_(apiVersion), j_(j)
{
}
@@ -1317,7 +1248,6 @@ public:
{"submit_multisigned", &RPCParser::parseSubmitMultiSigned, 1, 1},
{"transaction_entry", &RPCParser::parseTransactionEntry, 2, 2},
{"tx", &RPCParser::parseTx, 1, 4},
{"tx_account", &RPCParser::parseTxAccount, 1, 7},
{"tx_history", &RPCParser::parseTxHistory, 1, 1},
{"unl_list", &RPCParser::parseAsIs, 0, 0},
{"validation_create", &RPCParser::parseValidationCreate, 0, 1},
@@ -1481,7 +1411,7 @@ rpcCmdToJson(
{
Json::Value jvRequest(Json::objectValue);
RPCParser rpParser(j);
RPCParser rpParser(apiVersion, j);
Json::Value jvRpcParams(Json::arrayValue);
for (int i = 1; i != args.size(); i++)
@@ -1673,7 +1603,7 @@ fromCommandLine(
Logs& logs)
{
auto const result =
rpcClient(vCmd, config, logs, RPC::apiMaximumSupportedVersion);
rpcClient(vCmd, config, logs, RPC::apiCommandLineVersion);
std::cout << result.second.toStyledString();

View File

@@ -33,7 +33,7 @@ namespace BuildInfo {
// and follow the format described at http://semver.org/
//------------------------------------------------------------------------------
// clang-format off
char const* const versionString = "2.0.0-rc1"
char const* const versionString = "2.0.0-rc2"
// clang-format on
#if defined(DEBUG) || defined(SANITIZER)

View File

@@ -234,8 +234,9 @@ extern beast::SemanticVersion const lastVersion;
constexpr unsigned int apiInvalidVersion = 0;
constexpr unsigned int apiVersionIfUnspecified = 1;
constexpr unsigned int apiMinimumSupportedVersion = 1;
constexpr unsigned int apiMaximumSupportedVersion = 1;
constexpr unsigned int apiBetaVersion = 2;
constexpr unsigned int apiMaximumSupportedVersion = 2;
constexpr unsigned int apiCommandLineVersion = 1; // TODO Bump to 2 later
constexpr unsigned int apiBetaVersion = 3;
constexpr unsigned int apiMaximumValidVersion = apiBetaVersion;
static_assert(apiMinimumSupportedVersion >= apiVersionIfUnspecified);

View File

@@ -279,6 +279,17 @@ public:
The command is examined and used to build
the correct JSON as per the arguments.
*/
template <class... Args>
Json::Value
rpc(unsigned apiVersion,
std::unordered_map<std::string, std::string> const& headers,
std::string const& cmd,
Args&&... args);
template <class... Args>
Json::Value
rpc(unsigned apiVersion, std::string const& cmd, Args&&... args);
template <class... Args>
Json::Value
rpc(std::unordered_map<std::string, std::string> const& headers,
@@ -655,6 +666,7 @@ protected:
Json::Value
do_rpc(
unsigned apiVersion,
std::vector<std::string> const& args,
std::unordered_map<std::string, std::string> const& headers = {});
@@ -698,12 +710,39 @@ protected:
template <class... Args>
Json::Value
Env::rpc(
unsigned apiVersion,
std::unordered_map<std::string, std::string> const& headers,
std::string const& cmd,
Args&&... args)
{
return do_rpc(
std::vector<std::string>{cmd, std::forward<Args>(args)...}, headers);
apiVersion,
std::vector<std::string>{cmd, std::forward<Args>(args)...},
headers);
}
template <class... Args>
Json::Value
Env::rpc(unsigned apiVersion, std::string const& cmd, Args&&... args)
{
return rpc(
apiVersion,
std::unordered_map<std::string, std::string>(),
cmd,
std::forward<Args>(args)...);
}
template <class... Args>
Json::Value
Env::rpc(
std::unordered_map<std::string, std::string> const& headers,
std::string const& cmd,
Args&&... args)
{
return do_rpc(
RPC::apiCommandLineVersion,
std::vector<std::string>{cmd, std::forward<Args>(args)...},
headers);
}
template <class... Args>
@@ -743,7 +782,7 @@ void
forAllApiVersions(VersionedTestCallable auto... testCallable)
{
for (auto testVersion = RPC::apiMinimumSupportedVersion;
testVersion <= RPC::apiBetaVersion;
testVersion <= RPC::apiMaximumValidVersion;
++testVersion)
{
(..., testCallable(testVersion));

View File

@@ -460,15 +460,11 @@ Env::st(JTx const& jt)
Json::Value
Env::do_rpc(
unsigned apiVersion,
std::vector<std::string> const& args,
std::unordered_map<std::string, std::string> const& headers)
{
return rpcClient(
args,
app().config(),
app().logs(),
RPC::apiMaximumSupportedVersion,
headers)
return rpcClient(args, app().config(), app().logs(), apiVersion, headers)
.second;
}

View File

@@ -67,7 +67,7 @@ Json::Value
cmdToJSONRPC(
std::vector<std::string> const& args,
beast::Journal j,
unsigned int apiVersion = RPC::apiMaximumSupportedVersion);
unsigned int apiVersion);
} // namespace jtx
} // namespace test

View File

@@ -137,6 +137,7 @@ class AccountTx_test : public beast::unit_test::suite
j[jss::result][jss::transactions][1u][jss::tx]
[jss::DeliverMax]);
case 2:
case 3:
if (j.isMember(jss::result) &&
(j[jss::result][jss::status] == "success") &&
(j[jss::result][jss::transactions].size() == 2) &&
@@ -198,20 +199,22 @@ class AccountTx_test : public beast::unit_test::suite
rpcACT_MALFORMED));
jParms[jss::account] = A1.human();
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(jParms))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(jParms))));
// Ledger min/max index
{
Json::Value p{jParms};
p[jss::ledger_index_min] = -1;
p[jss::ledger_index_max] = -1;
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_index_min] = 0;
p[jss::ledger_index_max] = 100;
if (apiVersion < 2u)
BEAST_EXPECT(
hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
else
BEAST_EXPECT(isErr(
env.rpc("json", "account_tx", to_string(p)),
@@ -238,12 +241,13 @@ class AccountTx_test : public beast::unit_test::suite
{
Json::Value p{jParms};
p[jss::ledger_index_min] = -1;
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_index_min] = 1;
if (apiVersion < 2u)
BEAST_EXPECT(
hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
else
BEAST_EXPECT(isErr(
env.rpc("json", "account_tx", to_string(p)),
@@ -260,22 +264,25 @@ class AccountTx_test : public beast::unit_test::suite
{
Json::Value p{jParms};
p[jss::ledger_index_max] = -1;
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_index_max] = env.current()->info().seq;
if (apiVersion < 2u)
BEAST_EXPECT(
hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
else
BEAST_EXPECT(isErr(
env.rpc("json", "account_tx", to_string(p)),
rpcLGR_IDX_MALFORMED));
p[jss::ledger_index_max] = 3;
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_index_max] = env.closed()->info().seq;
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_index_max] = env.closed()->info().seq - 1;
BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p))));
@@ -286,7 +293,8 @@ class AccountTx_test : public beast::unit_test::suite
Json::Value p{jParms};
p[jss::ledger_index] = env.closed()->info().seq;
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_index] = env.closed()->info().seq - 1;
BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p))));
@@ -306,7 +314,8 @@ class AccountTx_test : public beast::unit_test::suite
Json::Value p{jParms};
p[jss::ledger_hash] = to_string(env.closed()->info().hash);
BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
p[jss::ledger_hash] = to_string(env.closed()->info().parentHash);
BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p))));
@@ -324,8 +333,8 @@ class AccountTx_test : public beast::unit_test::suite
p[jss::ledger_index] = -1;
if (apiVersion < 2u)
BEAST_EXPECT(
hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
else
BEAST_EXPECT(isErr(
env.rpc("json", "account_tx", to_string(p)),
@@ -337,8 +346,8 @@ class AccountTx_test : public beast::unit_test::suite
Json::Value p{jParms};
p[jss::ledger_index_max] = env.current()->info().seq;
if (apiVersion < 2u)
BEAST_EXPECT(
hasTxs(env.rpc("json", "account_tx", to_string(p))));
BEAST_EXPECT(hasTxs(
env.rpc(apiVersion, "json", "account_tx", to_string(p))));
else
BEAST_EXPECT(isErr(
env.rpc("json", "account_tx", to_string(p)),

View File

@@ -24,6 +24,8 @@
#include <ripple/rpc/impl/RPCHelpers.h>
#include <test/jtx.h>
#include <functional>
namespace ripple {
namespace RPC {
@@ -263,7 +265,7 @@ public:
}
void
testBadInput()
testBadInput(unsigned apiVersion)
{
using namespace test::jtx;
Env env{*this};
@@ -287,9 +289,9 @@ public:
// the purpose in this test is to force the ledger expiration/out of
// date check to trigger
env.timeKeeper().adjustCloseTime(weeks{3});
result = env.rpc("ledger_request", "1")[jss::result];
result = env.rpc(apiVersion, "ledger_request", "1")[jss::result];
BEAST_EXPECT(result[jss::status] == "error");
if (RPC::apiMaximumSupportedVersion == 1)
if (apiVersion == 1)
{
BEAST_EXPECT(result[jss::error] == "noCurrent");
BEAST_EXPECT(
@@ -357,7 +359,8 @@ public:
{
testLedgerRequest();
testEvolution();
testBadInput();
test::jtx::forAllApiVersions(
std::bind_front(&LedgerRequestRPC_test::testBadInput, this));
testMoreThan256Closed();
testNonAdmin();
}

File diff suppressed because it is too large Load Diff

View File

@@ -230,22 +230,20 @@ class TransactionEntry_test : public beast::unit_test::suite
}
// Use the command line form with the index.
if (apiVersion == RPC::apiMaximumSupportedVersion)
{
Json::Value const clIndex{env.rpc(
"transaction_entry", txhash, std::to_string(index))};
BEAST_EXPECT(clIndex["result"] == resIndex);
}
Json::Value const clIndex{env.rpc(
apiVersion,
"transaction_entry",
txhash,
std::to_string(index))};
BEAST_EXPECT(clIndex["result"] == resIndex);
// Use the command line form with the ledger_hash.
if (apiVersion == RPC::apiMaximumSupportedVersion)
{
Json::Value const clHash{env.rpc(
"transaction_entry",
txhash,
resIndex[jss::ledger_hash].asString())};
BEAST_EXPECT(clHash["result"] == resIndex);
}
Json::Value const clHash{env.rpc(
apiVersion,
"transaction_entry",
txhash,
resIndex[jss::ledger_hash].asString())};
BEAST_EXPECT(clHash["result"] == resIndex);
};
Account A1{"A1"};

View File

@@ -21,11 +21,13 @@
#include <ripple/protocol/ErrorCodes.h>
#include <ripple/protocol/STBase.h>
#include <ripple/protocol/jss.h>
#include <ripple/protocol/serialize.h>
#include <ripple/rpc/CTID.h>
#include <optional>
#include <test/jtx.h>
#include <test/jtx/Env.h>
#include <test/jtx/envconfig.h>
#include <optional>
#include <tuple>
namespace ripple {
@@ -774,11 +776,82 @@ class Transaction_test : public beast::unit_test::suite
}
}
void
testBinaryRequest(unsigned apiVersion)
{
testcase(
"Test binary request API version " + std::to_string(apiVersion));
using namespace test::jtx;
using std::to_string;
Env env{*this};
Account const alice{"alice"};
Account const gw{"gw"};
auto const USD{gw["USD"]};
env.fund(XRP(1000000), alice, gw);
std::shared_ptr<STTx const> const txn = env.tx();
BEAST_EXPECT(
to_string(txn->getTransactionID()) ==
"3F8BDE5A5F82C4F4708E5E9255B713E303E6E1A371FD5C7A704AFD1387C23981");
env.close();
std::shared_ptr<STObject const> meta =
env.closed()->txRead(txn->getTransactionID()).second;
std::string const expected_tx_blob = serializeHex(*txn);
std::string const expected_meta_blob = serializeHex(*meta);
Json::Value const result = [&env, txn, apiVersion]() {
Json::Value params{Json::objectValue};
params[jss::transaction] = to_string(txn->getTransactionID());
params[jss::binary] = true;
params[jss::api_version] = apiVersion;
return env.client().invoke("tx", params);
}();
if (BEAST_EXPECT(result[jss::status] == "success"))
{
BEAST_EXPECT(result[jss::result][jss::status] == "success");
BEAST_EXPECT(result[jss::result][jss::validated] == true);
BEAST_EXPECT(
result[jss::result][jss::hash] ==
to_string(txn->getTransactionID()));
BEAST_EXPECT(result[jss::result][jss::ledger_index] == 3);
BEAST_EXPECT(result[jss::result][jss::ctid] == "C000000300030000");
if (apiVersion > 1)
{
BEAST_EXPECT(
result[jss::result][jss::tx_blob] == expected_tx_blob);
BEAST_EXPECT(
result[jss::result][jss::meta_blob] == expected_meta_blob);
BEAST_EXPECT(
result[jss::result][jss::ledger_hash] ==
"2D5150E5A5AA436736A732291E437ABF01BC9E206C2DF3C77C4F856915"
"7905AA");
BEAST_EXPECT(
result[jss::result][jss::close_time_iso] ==
"2000-01-01T00:00:10Z");
}
else
{
BEAST_EXPECT(result[jss::result][jss::tx] == expected_tx_blob);
BEAST_EXPECT(
result[jss::result][jss::meta] == expected_meta_blob);
BEAST_EXPECT(result[jss::result][jss::date] == 10);
}
}
}
public:
void
run() override
{
using namespace test::jtx;
test::jtx::forAllApiVersions(
std::bind_front(&Transaction_test::testBinaryRequest, this));
FeatureBitset const all{supported_amendments()};
testWithFeats(all);
}