Merge pull request #8 from ledhed2222/gw/more-docs

more docs improvements and poss step 2 speed increase
This commit is contained in:
ledhed2222
2023-05-05 04:14:52 -04:00
committed by GitHub
2 changed files with 74 additions and 42 deletions

View File

@@ -15,20 +15,20 @@ to the above-referenced PR, so it is very safe to drop.
This tool should be used as follows, with regard to the above update: This tool should be used as follows, with regard to the above update:
1. __Stop serving requests from your clio__ If you need to achieve zero downtime, you have two options: 1. __Compile or download the new version of `clio`__, but don't run it just yet.
- Temporarily point your traffic to someone else's clio that has already performed this 2. __Stop serving requests from your existing `clio`__. If you need to achieve zero downtime, you have two options:
- Temporarily point your traffic to someone else's `clio` that has already performed this
migration. The XRPL Foundation should have performed this on their servers before this migration. The XRPL Foundation should have performed this on their servers before this
release. Ask in our Discord what server to point traffic to. release. Ask in our Discord what server to point traffic to.
- Create a new temporary `clio` instance running _the prior release_ and make sure - Create a new temporary `clio` instance running _the prior release_ and make sure
that its config.json specifies `read_only: true`. You can safely serve data that its config.json specifies `read_only: true`. You can safely serve data
from this separate instance. from this separate instance.
2. __Stop your clio and upgrade it to the version after the above PR__ 3. __Stop your `clio` and restart it, running the new version__. Now, your `clio` is writing new data correctly. This tool will update your
3. __Start your clio__ Now, your clio is writing new data correctly. This tool will update your old data, while your upgraded `clio` is running and writing new ledgers.
old data, while your new clio is running and writing new ledgers.
5. __Run this tool__, using the _exact_ same config as what you are using for your 5. __Run this tool__, using the _exact_ same config as what you are using for your
production clio. production `clio`.
6. __Once this tool terminates successfully__, you can resume serving requests 6. __Once this tool terminates successfully__, you can resume serving requests
from your clio. from your `clio`.
## Notes on timing ## Notes on timing

View File

@@ -83,7 +83,7 @@ doTryFetchLedgerPage(
{ {
try try
{ {
return backend.fetchLedgerPage(cursor, sequence, 2000, false, yield); return backend.fetchLedgerPage(cursor, sequence, 10000, false, yield);
} }
catch (Backend::DatabaseTimeout const& e) catch (Backend::DatabaseTimeout const& e)
{ {
@@ -118,26 +118,12 @@ doTryGetTxPageResult(
} }
static void static void
doMigration( doMigrationStepOne(
Backend::CassandraBackend& backend, Backend::CassandraBackend& backend,
boost::asio::steady_timer& timer, boost::asio::steady_timer& timer,
boost::asio::yield_context& yield) boost::asio::yield_context& yield,
Backend::LedgerRange const& ledgerRange)
{ {
BOOST_LOG_TRIVIAL(info) << "Beginning migration";
auto const ledgerRange = backend.hardFetchLedgerRangeNoThrow(yield);
/*
* Step 0 - If we haven't downloaded the initial ledger yet, just short
* circuit.
*/
if (!ledgerRange)
{
BOOST_LOG_TRIVIAL(info) << "There is no data to migrate";
return;
}
std::vector<NFTsData> toWrite;
/* /*
* Step 1 - Look at all NFT transactions recorded in * Step 1 - Look at all NFT transactions recorded in
* `nf_token_transactions` and reload any NFTokenMint transactions. These * `nf_token_transactions` and reload any NFTokenMint transactions. These
@@ -146,6 +132,9 @@ doMigration(
* the tokens in `nf_tokens` because we also want to cover the extreme * the tokens in `nf_tokens` because we also want to cover the extreme
* edge case of a token that is re-minted with a different URI. * edge case of a token that is re-minted with a different URI.
*/ */
std::string const stepTag = "Step 1 - transaction loading";
std::vector<NFTsData> toWrite;
std::stringstream query; std::stringstream query;
query << "SELECT hash FROM " << backend.tablePrefix() query << "SELECT hash FROM " << backend.tablePrefix()
<< "nf_token_transactions"; << "nf_token_transactions";
@@ -184,11 +173,12 @@ doMigration(
txHashes.push_back(ripple::uint256::fromVoid(buf)); txHashes.push_back(ripple::uint256::fromVoid(buf));
} }
auto txs = doTryFetchTransactions(timer, backend, txHashes, yield); auto const txs =
doTryFetchTransactions(timer, backend, txHashes, yield);
for (auto const& tx : txs) for (auto const& tx : txs)
{ {
if (tx.ledgerSequence > ledgerRange->maxSequence) if (tx.ledgerSequence > ledgerRange.maxSequence)
continue; continue;
ripple::STTx const sttx{ripple::SerialIter{ ripple::STTx const sttx{ripple::SerialIter{
@@ -202,7 +192,7 @@ doMigration(
std::get<1>(getNFTDataFromTx(txMeta, sttx)).value()); std::get<1>(getNFTDataFromTx(txMeta, sttx)).value());
} }
toWrite = maybeDoNFTWrite(toWrite, backend, "TX"); toWrite = maybeDoNFTWrite(toWrite, backend, stepTag);
morePages = cass_result_has_more_pages(result); morePages = cass_result_has_more_pages(result);
if (morePages) if (morePages)
@@ -212,9 +202,16 @@ doMigration(
} }
cass_statement_free(nftTxQuery); cass_statement_free(nftTxQuery);
toWrite = doNFTWrite(toWrite, backend, "TX"); doNFTWrite(toWrite, backend, stepTag);
BOOST_LOG_TRIVIAL(info) << "\nDone with transaction loading!\n"; }
static void
doMigrationStepTwo(
Backend::CassandraBackend& backend,
boost::asio::steady_timer& timer,
boost::asio::yield_context& yield,
Backend::LedgerRange const& ledgerRange)
{
/* /*
* Step 2 - Pull every object from our initial ledger and load all NFTs * Step 2 - Pull every object from our initial ledger and load all NFTs
* found in any NFTokenPage object. Prior to this migration, we were not * found in any NFTokenPage object. Prior to this migration, we were not
@@ -222,38 +219,43 @@ doMigration(
* missed. This will also record the URI of any NFTs minted prior to the * missed. This will also record the URI of any NFTs minted prior to the
* start sequence. * start sequence.
*/ */
std::string const stepTag = "Step 2 - initial ledger loading";
std::vector<NFTsData> toWrite;
std::optional<ripple::uint256> cursor; std::optional<ripple::uint256> cursor;
// For each object page in initial ledger // For each object page in initial ledger
do do
{ {
auto const page = doTryFetchLedgerPage( auto const page = doTryFetchLedgerPage(
timer, backend, cursor, ledgerRange->minSequence, yield); timer, backend, cursor, ledgerRange.minSequence, yield);
// For each object in page of 2000 // For each object in page
for (auto const& object : page.objects) for (auto const& object : page.objects)
{ {
std::vector<NFTsData> objectNFTs = getNFTDataFromObj( auto const objectNFTs = getNFTDataFromObj(
ledgerRange->minSequence, ledgerRange.minSequence,
std::string(object.key.begin(), object.key.end()), std::string(object.key.begin(), object.key.end()),
std::string(object.blob.begin(), object.blob.end())); std::string(object.blob.begin(), object.blob.end()));
toWrite.insert(toWrite.end(), objectNFTs.begin(), objectNFTs.end()); toWrite.insert(toWrite.end(), objectNFTs.begin(), objectNFTs.end());
} }
toWrite = maybeDoNFTWrite(toWrite, backend, "OBJ"); toWrite = maybeDoNFTWrite(toWrite, backend, stepTag);
cursor = page.cursor; cursor = page.cursor;
} while (cursor.has_value()); } while (cursor.has_value());
toWrite = doNFTWrite(toWrite, backend, "OBJ"); doNFTWrite(toWrite, backend, stepTag);
BOOST_LOG_TRIVIAL(info) << "\nDone with object loading!\n"; }
static bool
doMigrationStepThree(Backend::CassandraBackend& backend)
{
/* /*
* Step 3 - Drop the old `issuer_nf_tokens` table, which is replaced by * Step 3 - Drop the old `issuer_nf_tokens` table, which is replaced by
* `issuer_nf_tokens_v2`. Normally, we should probably not drop old tables * `issuer_nf_tokens_v2`. Normally, we should probably not drop old tables
* in migrations, but here it is safe since the old table wasn't yet being * in migrations, but here it is safe since the old table wasn't yet being
* used to serve any data anyway. * used to serve any data anyway.
*/ */
query.str(""); std::stringstream query;
query << "DROP TABLE " << backend.tablePrefix() << "issuer_nf_tokens"; query << "DROP TABLE " << backend.tablePrefix() << "issuer_nf_tokens";
CassStatement* issuerDropTableQuery = CassStatement* issuerDropTableQuery =
cass_statement_new(query.str().c_str(), 0); cass_statement_new(query.str().c_str(), 0);
@@ -263,12 +265,42 @@ doMigration(
cass_future_free(fut); cass_future_free(fut);
cass_statement_free(issuerDropTableQuery); cass_statement_free(issuerDropTableQuery);
backend.sync(); backend.sync();
if (rc != CASS_OK) return rc == CASS_OK;
BOOST_LOG_TRIVIAL(warning) << "\nCould not drop old issuer_nf_tokens " }
static void
doMigration(
Backend::CassandraBackend& backend,
boost::asio::steady_timer& timer,
boost::asio::yield_context& yield)
{
BOOST_LOG_TRIVIAL(info) << "Beginning migration";
auto const ledgerRange = backend.hardFetchLedgerRangeNoThrow(yield);
/*
* Step 0 - If we haven't downloaded the initial ledger yet, just short
* circuit.
*/
if (!ledgerRange)
{
BOOST_LOG_TRIVIAL(info) << "There is no data to migrate";
return;
}
doMigrationStepOne(backend, timer, yield, *ledgerRange);
BOOST_LOG_TRIVIAL(info) << "\nStep 1 done!\n";
doMigrationStepTwo(backend, timer, yield, *ledgerRange);
BOOST_LOG_TRIVIAL(info) << "\nStep 2 done!\n";
auto const stepThreeResult = doMigrationStepThree(backend);
BOOST_LOG_TRIVIAL(info) << "\nStep 3 done!";
if (stepThreeResult)
BOOST_LOG_TRIVIAL(info) << "Dropped old 'issuer_nf_tokens' table!\n";
else
BOOST_LOG_TRIVIAL(warning) << "Could not drop old issuer_nf_tokens "
"table. If it still exists, " "table. If it still exists, "
"you should drop it yourself\n"; "you should drop it yourself\n";
else
BOOST_LOG_TRIVIAL(info) << "\nDropped old 'issuer_nf_tokens' table!\n";
BOOST_LOG_TRIVIAL(info) BOOST_LOG_TRIVIAL(info)
<< "\nCompleted migration from " << ledgerRange->minSequence << " to " << "\nCompleted migration from " << ledgerRange->minSequence << " to "