mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
Implement node-to-shard RPC control
This commit is contained in:
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <ripple/app/ledger/LedgerMaster.h>
|
||||
#include <ripple/app/ledger/LedgerToJson.h>
|
||||
#include <ripple/app/misc/LoadFeeTrack.h>
|
||||
#include <ripple/app/misc/SHAMapStore.h>
|
||||
#include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
|
||||
#include <ripple/basics/Slice.h>
|
||||
@@ -263,6 +264,10 @@ class DatabaseShard_test : public TestBase
|
||||
{
|
||||
using namespace test::jtx;
|
||||
|
||||
// The local fee may go up, especially in the online delete tests
|
||||
while (env_.app().getFeeTrack().lowerLocalFee())
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
|
||||
if (isNewAccounts(seq))
|
||||
env_.fund(XRP(iniAmount), accounts_[nAccounts_[seq] - 1]);
|
||||
|
||||
@@ -1249,14 +1254,22 @@ class DatabaseShard_test : public TestBase
|
||||
Database& ndb = env.app().getNodeStore();
|
||||
BEAST_EXPECT(db);
|
||||
|
||||
auto& store = env.app().getSHAMapStore();
|
||||
|
||||
// Allow online delete to delete the startup ledgers
|
||||
// so that it will take some time for the import to
|
||||
// catch up to the point of the next rotation
|
||||
store.setCanDelete(10);
|
||||
|
||||
// Create some ledgers for the shard store to import
|
||||
auto const shardCount = 5;
|
||||
TestData data(seedValue, 4, shardCount);
|
||||
if (!BEAST_EXPECT(data.makeLedgers(env)))
|
||||
return;
|
||||
|
||||
auto& store = env.app().getSHAMapStore();
|
||||
auto lastRotated = store.getLastRotated();
|
||||
store.rendezvous();
|
||||
auto const lastRotated = store.getLastRotated();
|
||||
BEAST_EXPECT(lastRotated >= 553 && lastRotated < 1103);
|
||||
|
||||
// Start the import
|
||||
db->importDatabase(ndb);
|
||||
@@ -1267,37 +1280,45 @@ class DatabaseShard_test : public TestBase
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
// Enable online deletion now that the import has started
|
||||
// Enable unimpeded online deletion now that the import has started
|
||||
store.setCanDelete(std::numeric_limits<std::uint32_t>::max());
|
||||
|
||||
auto pauseVerifier = std::thread([lastRotated, &store, db, this] {
|
||||
while (true)
|
||||
// The import should still be running when this thread starts
|
||||
BEAST_EXPECT(db->getDatabaseImportSequence());
|
||||
auto rotationProgress = lastRotated;
|
||||
while (auto const ledgerSeq = db->getDatabaseImportSequence())
|
||||
{
|
||||
// Make sure database rotations dont interfere
|
||||
// with the import
|
||||
|
||||
if (store.getLastRotated() != lastRotated)
|
||||
auto const last = store.getLastRotated();
|
||||
if (last != rotationProgress)
|
||||
{
|
||||
// A rotation occurred during shard import. Not
|
||||
// necessarily an error
|
||||
|
||||
auto const ledgerSeq = db->getDatabaseImportSequence();
|
||||
BEAST_EXPECT(!ledgerSeq || ledgerSeq >= lastRotated);
|
||||
|
||||
break;
|
||||
BEAST_EXPECT(
|
||||
!ledgerSeq || ledgerSeq >= rotationProgress);
|
||||
rotationProgress = last;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
auto join = [&pauseVerifier]() {
|
||||
if (pauseVerifier.joinable())
|
||||
pauseVerifier.join();
|
||||
};
|
||||
|
||||
// Create more ledgers to trigger online deletion
|
||||
data = TestData(seedValue * 2);
|
||||
if (!BEAST_EXPECT(data.makeLedgers(env, shardCount)))
|
||||
{
|
||||
pauseVerifier.join();
|
||||
join();
|
||||
return;
|
||||
}
|
||||
|
||||
pauseVerifier.join();
|
||||
join();
|
||||
BEAST_EXPECT(store.getLastRotated() != lastRotated);
|
||||
}
|
||||
|
||||
|
||||
331
src/test/rpc/NodeToShardRPC_test.cpp
Normal file
331
src/test/rpc/NodeToShardRPC_test.cpp
Normal file
@@ -0,0 +1,331 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of rippled: https://github.com/ripple/rippled
|
||||
Copyright (c) 2021 Ripple Labs Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include <ripple/beast/unit_test.h>
|
||||
#include <ripple/beast/utility/temp_dir.h>
|
||||
#include <ripple/core/ConfigSections.h>
|
||||
#include <ripple/nodestore/DatabaseShard.h>
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <test/jtx/Env.h>
|
||||
|
||||
namespace ripple {
|
||||
namespace test {
|
||||
|
||||
class NodeToShardRPC_test : public beast::unit_test::suite
|
||||
{
|
||||
bool
|
||||
importCompleted(
|
||||
NodeStore::DatabaseShard* shardStore,
|
||||
std::uint8_t const numberOfShards,
|
||||
Json::Value const& result)
|
||||
{
|
||||
auto const info = shardStore->getShardInfo();
|
||||
|
||||
// Assume completed if the import isn't running
|
||||
auto const completed =
|
||||
result[jss::error_message] == "Database import not running";
|
||||
|
||||
if (completed)
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
info->incomplete().size() + info->finalized().size() ==
|
||||
numberOfShards);
|
||||
}
|
||||
|
||||
return completed;
|
||||
}
|
||||
|
||||
public:
|
||||
void
|
||||
testStart()
|
||||
{
|
||||
testcase("Start");
|
||||
|
||||
beast::temp_dir tempDir;
|
||||
|
||||
jtx::Env env = [&] {
|
||||
auto c = jtx::envconfig();
|
||||
auto& section = c->section(ConfigSection::shardDatabase());
|
||||
section.set("path", tempDir.path());
|
||||
section.set("max_historical_shards", "20");
|
||||
section.set("ledgers_per_shard", "256");
|
||||
section.set("earliest_seq", "257");
|
||||
auto& sectionNode = c->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("earliest_seq", "257");
|
||||
sectionNode.set("ledgers_per_shard", "256");
|
||||
c->setupControl(true, true, true);
|
||||
|
||||
return jtx::Env(*this, std::move(c));
|
||||
}();
|
||||
|
||||
std::uint8_t const numberOfShards = 10;
|
||||
|
||||
// Create some ledgers so that we can initiate a
|
||||
// shard store database import.
|
||||
for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() *
|
||||
(numberOfShards + 1);
|
||||
++i)
|
||||
{
|
||||
env.close();
|
||||
}
|
||||
|
||||
auto shardStore = env.app().getShardStore();
|
||||
if (!BEAST_EXPECT(shardStore))
|
||||
return;
|
||||
|
||||
{
|
||||
// Initiate a shard store import via the RPC
|
||||
// interface.
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::action] = "start";
|
||||
|
||||
auto const result = env.rpc(
|
||||
"json", "node_to_shard", to_string(jvParams))[jss::result];
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::message] == "Database import initiated...");
|
||||
}
|
||||
|
||||
while (!shardStore->getDatabaseImportSequence())
|
||||
{
|
||||
// Wait until the import starts
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
{
|
||||
// Verify that the import is in progress with
|
||||
// the node_to_shard status RPC command
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::action] = "status";
|
||||
|
||||
auto const result = env.rpc(
|
||||
"json", "node_to_shard", to_string(jvParams))[jss::result];
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::status] == "success" ||
|
||||
importCompleted(shardStore, numberOfShards, result));
|
||||
|
||||
std::chrono::seconds const maxWait{60};
|
||||
auto const start = std::chrono::system_clock::now();
|
||||
|
||||
while (true)
|
||||
{
|
||||
// Verify that the status object accurately
|
||||
// reflects import progress.
|
||||
|
||||
auto const completeShards =
|
||||
shardStore->getShardInfo()->finalized();
|
||||
|
||||
if (!completeShards.empty())
|
||||
{
|
||||
auto const result = env.rpc(
|
||||
"json",
|
||||
"node_to_shard",
|
||||
to_string(jvParams))[jss::result];
|
||||
|
||||
if (!importCompleted(shardStore, numberOfShards, result))
|
||||
{
|
||||
BEAST_EXPECT(result[jss::firstShardIndex] == 1);
|
||||
BEAST_EXPECT(result[jss::lastShardIndex] == 10);
|
||||
}
|
||||
}
|
||||
|
||||
if (boost::icl::contains(completeShards, 1))
|
||||
{
|
||||
auto const result = env.rpc(
|
||||
"json",
|
||||
"node_to_shard",
|
||||
to_string(jvParams))[jss::result];
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::currentShardIndex] >= 1 ||
|
||||
importCompleted(shardStore, numberOfShards, result));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (std::this_thread::sleep_for(std::chrono::milliseconds{100});
|
||||
std::chrono::system_clock::now() - start > maxWait)
|
||||
{
|
||||
BEAST_EXPECTS(
|
||||
false, "Import timeout: could just be a slow machine.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the import to complete
|
||||
while (!boost::icl::contains(
|
||||
shardStore->getShardInfo()->finalized(), 10))
|
||||
{
|
||||
if (std::this_thread::sleep_for(std::chrono::milliseconds{100});
|
||||
std::chrono::system_clock::now() - start > maxWait)
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
importCompleted(shardStore, numberOfShards, result));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
testStop()
|
||||
{
|
||||
testcase("Stop");
|
||||
|
||||
beast::temp_dir tempDir;
|
||||
|
||||
jtx::Env env = [&] {
|
||||
auto c = jtx::envconfig();
|
||||
auto& section = c->section(ConfigSection::shardDatabase());
|
||||
section.set("path", tempDir.path());
|
||||
section.set("max_historical_shards", "20");
|
||||
section.set("ledgers_per_shard", "256");
|
||||
section.set("earliest_seq", "257");
|
||||
auto& sectionNode = c->section(ConfigSection::nodeDatabase());
|
||||
sectionNode.set("earliest_seq", "257");
|
||||
sectionNode.set("ledgers_per_shard", "256");
|
||||
c->setupControl(true, true, true);
|
||||
|
||||
return jtx::Env(*this, std::move(c));
|
||||
}();
|
||||
|
||||
std::uint8_t const numberOfShards = 10;
|
||||
|
||||
// Create some ledgers so that we can initiate a
|
||||
// shard store database import.
|
||||
for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() *
|
||||
(numberOfShards + 1);
|
||||
++i)
|
||||
{
|
||||
env.close();
|
||||
}
|
||||
|
||||
auto shardStore = env.app().getShardStore();
|
||||
if (!BEAST_EXPECT(shardStore))
|
||||
return;
|
||||
|
||||
{
|
||||
// Initiate a shard store import via the RPC
|
||||
// interface.
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::action] = "start";
|
||||
|
||||
auto const result = env.rpc(
|
||||
"json", "node_to_shard", to_string(jvParams))[jss::result];
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::message] == "Database import initiated...");
|
||||
}
|
||||
|
||||
{
|
||||
// Verify that the import is in progress with
|
||||
// the node_to_shard status RPC command
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::action] = "status";
|
||||
|
||||
auto const result = env.rpc(
|
||||
"json", "node_to_shard", to_string(jvParams))[jss::result];
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::status] == "success" ||
|
||||
importCompleted(shardStore, numberOfShards, result));
|
||||
|
||||
std::chrono::seconds const maxWait{10};
|
||||
auto const start = std::chrono::system_clock::now();
|
||||
|
||||
while (shardStore->getShardInfo()->finalized().empty())
|
||||
{
|
||||
// Wait for at least one shard to complete
|
||||
|
||||
if (std::this_thread::sleep_for(std::chrono::milliseconds{100});
|
||||
std::chrono::system_clock::now() - start > maxWait)
|
||||
{
|
||||
BEAST_EXPECTS(
|
||||
false, "Import timeout: could just be a slow machine.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::action] = "stop";
|
||||
|
||||
auto const result = env.rpc(
|
||||
"json", "node_to_shard", to_string(jvParams))[jss::result];
|
||||
|
||||
BEAST_EXPECT(
|
||||
result[jss::message] == "Database import halt initiated..." ||
|
||||
importCompleted(shardStore, numberOfShards, result));
|
||||
}
|
||||
|
||||
std::chrono::seconds const maxWait{10};
|
||||
auto const start = std::chrono::system_clock::now();
|
||||
|
||||
while (true)
|
||||
{
|
||||
// Wait until we can verify that the import has
|
||||
// stopped
|
||||
|
||||
Json::Value jvParams;
|
||||
jvParams[jss::action] = "status";
|
||||
|
||||
auto const result = env.rpc(
|
||||
"json", "node_to_shard", to_string(jvParams))[jss::result];
|
||||
|
||||
// When the import has stopped, polling the
|
||||
// status returns an error
|
||||
if (result.isMember(jss::error))
|
||||
{
|
||||
if (BEAST_EXPECT(result.isMember(jss::error_message)))
|
||||
{
|
||||
BEAST_EXPECT(
|
||||
result[jss::error_message] ==
|
||||
"Database import not running");
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (std::this_thread::sleep_for(std::chrono::milliseconds{100});
|
||||
std::chrono::system_clock::now() - start > maxWait)
|
||||
{
|
||||
BEAST_EXPECTS(
|
||||
false, "Import timeout: could just be a slow machine.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
run() override
|
||||
{
|
||||
testStart();
|
||||
testStop();
|
||||
}
|
||||
};
|
||||
|
||||
BEAST_DEFINE_TESTSUITE(NodeToShardRPC, rpc, ripple);
|
||||
} // namespace test
|
||||
} // namespace ripple
|
||||
@@ -4301,28 +4301,53 @@ static RPCCallTestData const rpcCallTestArray[] = {
|
||||
]
|
||||
})"},
|
||||
|
||||
// nodetoshard_status
|
||||
// node_to_shard
|
||||
// -------------------------------------------------------------------
|
||||
{"nodetoshard_status: minimal.",
|
||||
{"node_to_shard: status.",
|
||||
__LINE__,
|
||||
{
|
||||
"nodetoshard_status",
|
||||
},
|
||||
{"node_to_shard", "status"},
|
||||
RPCCallTestData::no_exception,
|
||||
R"({
|
||||
"method" : "nodetoshard_status",
|
||||
"method" : "node_to_shard",
|
||||
"params" : [
|
||||
{
|
||||
"api_version" : %MAX_API_VER%,
|
||||
"action" : "status"
|
||||
}
|
||||
]
|
||||
})"},
|
||||
{"nodetoshard_status: too many arguments.",
|
||||
{"node_to_shard: start.",
|
||||
__LINE__,
|
||||
{"nodetoshard_status", "extra"},
|
||||
{"node_to_shard", "start"},
|
||||
RPCCallTestData::no_exception,
|
||||
R"({
|
||||
"method" : "nodetoshard_status",
|
||||
"method" : "node_to_shard",
|
||||
"params" : [
|
||||
{
|
||||
"api_version" : %MAX_API_VER%,
|
||||
"action" : "start"
|
||||
}
|
||||
]
|
||||
})"},
|
||||
{"node_to_shard: stop.",
|
||||
__LINE__,
|
||||
{"node_to_shard", "stop"},
|
||||
RPCCallTestData::no_exception,
|
||||
R"({
|
||||
"method" : "node_to_shard",
|
||||
"params" : [
|
||||
{
|
||||
"api_version" : %MAX_API_VER%,
|
||||
"action" : "stop"
|
||||
}
|
||||
]
|
||||
})"},
|
||||
{"node_to_shard: too many arguments.",
|
||||
__LINE__,
|
||||
{"node_to_shard", "start", "stop"},
|
||||
RPCCallTestData::no_exception,
|
||||
R"({
|
||||
"method" : "node_to_shard",
|
||||
"params" : [
|
||||
{
|
||||
"error" : "badSyntax",
|
||||
@@ -4331,6 +4356,19 @@ static RPCCallTestData const rpcCallTestArray[] = {
|
||||
}
|
||||
]
|
||||
})"},
|
||||
{"node_to_shard: invalid argument.",
|
||||
__LINE__,
|
||||
{"node_to_shard", "invalid"},
|
||||
RPCCallTestData::no_exception,
|
||||
R"({
|
||||
"method" : "node_to_shard",
|
||||
"params" : [
|
||||
{
|
||||
"api_version" : %MAX_API_VER%,
|
||||
"action" : "invalid"
|
||||
}
|
||||
]
|
||||
})"},
|
||||
|
||||
// owner_info
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
Reference in New Issue
Block a user