rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/app/rdb/backend/SQLiteDatabase.h>
24 #include <ripple/basics/ByteUtilities.h>
25 #include <ripple/basics/RangeSet.h>
26 #include <ripple/basics/chrono.h>
27 #include <ripple/basics/random.h>
28 #include <ripple/core/ConfigSections.h>
29 #include <ripple/nodestore/DummyScheduler.h>
30 #include <ripple/nodestore/impl/DatabaseShardImp.h>
31 #include <ripple/overlay/Overlay.h>
32 #include <ripple/overlay/predicates.h>
33 #include <ripple/protocol/HashPrefix.h>
34 #include <ripple/protocol/LedgerHeader.h>
35 #include <ripple/protocol/digest.h>
36 
37 #include <boost/algorithm/string/predicate.hpp>
38 
39 #if BOOST_OS_LINUX
40 #include <sys/statvfs.h>
41 #endif
42 
43 namespace ripple {
44 
45 namespace NodeStore {
46 
48  Application& app,
49  Scheduler& scheduler,
50  int readThreads,
52  : DatabaseShard(
53  scheduler,
54  readThreads,
55  app.config().section(ConfigSection::shardDatabase()),
56  j)
57  , app_(app)
58  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
59  , openFinalLimit_(
60  app.config().getValueFor(SizedItem::openFinalLimit, std::nullopt))
61 {
62  if (app.config().reporting())
63  {
64  Throw<std::runtime_error>(
65  "Attempted to create DatabaseShardImp in reporting mode. Reporting "
66  "does not support shards. Remove shards info from config");
67  }
68 }
69 
70 bool
72 {
73  {
74  std::lock_guard lock(mutex_);
75  if (init_)
76  {
77  JLOG(j_.error()) << "already initialized";
78  return false;
79  }
80 
81  if (!initConfig(lock))
82  {
83  JLOG(j_.error()) << "invalid configuration file settings";
84  return false;
85  }
86 
87  try
88  {
89  using namespace boost::filesystem;
90 
91  // Consolidate the main storage path and all historical paths
92  std::vector<path> paths{dir_};
93  paths.insert(
94  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
95 
96  for (auto const& path : paths)
97  {
98  if (exists(path))
99  {
100  if (!is_directory(path))
101  {
102  JLOG(j_.error()) << path << " must be a directory";
103  return false;
104  }
105  }
106  else if (!create_directories(path))
107  {
108  JLOG(j_.error())
109  << "failed to create path: " + path.string();
110  return false;
111  }
112  }
113 
115  {
116  // Check historical paths for duplicated file systems
117  if (!checkHistoricalPaths(lock))
118  return false;
119  }
120 
121  ctx_ = std::make_unique<nudb::context>();
122  ctx_->start();
123 
124  // Find shards
125  std::uint32_t openFinals{0};
126  for (auto const& path : paths)
127  {
128  for (auto const& it : directory_iterator(path))
129  {
130  // Ignore files
131  if (!is_directory(it))
132  continue;
133 
134  // Ignore nonnumerical directory names
135  auto const shardDir{it.path()};
136  auto dirName{shardDir.stem().string()};
137  if (!std::all_of(
138  dirName.begin(), dirName.end(), [](auto c) {
139  return ::isdigit(static_cast<unsigned char>(c));
140  }))
141  {
142  continue;
143  }
144 
145  // Ignore values below the earliest shard index
146  auto const shardIndex{std::stoul(dirName)};
147  if (shardIndex < earliestShardIndex_)
148  {
149  JLOG(j_.debug())
150  << "shard " << shardIndex
151  << " ignored, comes before earliest shard index "
153  continue;
154  }
155 
156  // Check if a previous database import failed
157  if (is_regular_file(shardDir / databaseImportMarker_))
158  {
159  JLOG(j_.warn())
160  << "shard " << shardIndex
161  << " previously failed database import, removing";
162  remove_all(shardDir);
163  continue;
164  }
165 
166  auto shard{std::make_shared<Shard>(
167  app_, *this, shardIndex, shardDir.parent_path(), j_)};
168  if (!shard->init(scheduler_, *ctx_))
169  {
170  // Remove corrupted or legacy shard
171  shard->removeOnDestroy();
172  JLOG(j_.warn())
173  << "shard " << shardIndex << " removed, "
174  << (shard->isLegacy() ? "legacy" : "corrupted")
175  << " shard";
176  continue;
177  }
178 
179  switch (shard->getState())
180  {
182  if (++openFinals > openFinalLimit_)
183  shard->tryClose();
184  shards_.emplace(shardIndex, std::move(shard));
185  break;
186 
189  shards_.emplace(shardIndex, std::move(shard))
190  .first->second,
191  true,
192  std::nullopt);
193  break;
194 
195  case ShardState::acquire:
196  if (acquireIndex_ != 0)
197  {
198  JLOG(j_.error())
199  << "more than one shard being acquired";
200  return false;
201  }
202 
203  shards_.emplace(shardIndex, std::move(shard));
204  acquireIndex_ = shardIndex;
205  break;
206 
207  default:
208  JLOG(j_.error())
209  << "shard " << shardIndex << " invalid state";
210  return false;
211  }
212  }
213  }
214  }
215  catch (std::exception const& e)
216  {
217  JLOG(j_.fatal()) << "Exception caught in function " << __func__
218  << ". Error: " << e.what();
219  return false;
220  }
221 
222  init_ = true;
223  }
224 
225  updateFileStats();
226  return true;
227 }
228 
231 {
232  std::optional<std::uint32_t> shardIndex;
233 
234  {
235  std::lock_guard lock(mutex_);
236  assert(init_);
237 
238  if (acquireIndex_ != 0)
239  {
240  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
241  return it->second->prepare();
242 
243  // Should never get here
244  assert(false);
245  return std::nullopt;
246  }
247 
248  if (!canAdd_)
249  return std::nullopt;
250 
251  shardIndex = findAcquireIndex(validLedgerSeq, lock);
252  }
253 
254  if (!shardIndex)
255  {
256  JLOG(j_.debug()) << "no new shards to add";
257  {
258  std::lock_guard lock(mutex_);
259  canAdd_ = false;
260  }
261  return std::nullopt;
262  }
263 
264  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
265  std::lock_guard lock(mutex_);
266  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
267  }();
268 
269  if (!pathDesignation)
270  return std::nullopt;
271 
272  auto const needsHistoricalPath =
273  *pathDesignation == PathDesignation::historical;
274 
275  auto shard = [this, shardIndex, needsHistoricalPath] {
276  std::lock_guard lock(mutex_);
277  return std::make_unique<Shard>(
278  app_,
279  *this,
280  *shardIndex,
281  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
282  j_);
283  }();
284 
285  if (!shard->init(scheduler_, *ctx_))
286  return std::nullopt;
287 
288  auto const ledgerSeq{shard->prepare()};
289  {
290  std::lock_guard lock(mutex_);
291  shards_.emplace(*shardIndex, std::move(shard));
292  acquireIndex_ = *shardIndex;
293  updatePeers(lock);
294  }
295 
296  return ledgerSeq;
297 }
298 
299 bool
301 {
302  auto fail = [j = j_, &shardIndexes](
303  std::string const& msg,
304  std::optional<std::uint32_t> shardIndex = std::nullopt) {
305  auto multipleIndexPrequel = [&shardIndexes] {
306  std::vector<std::string> indexesAsString(shardIndexes.size());
308  shardIndexes.begin(),
309  shardIndexes.end(),
310  indexesAsString.begin(),
311  [](uint32_t const index) { return std::to_string(index); });
312 
313  return std::string("shard") +
314  (shardIndexes.size() > 1 ? "s " : " ") +
315  boost::algorithm::join(indexesAsString, ", ");
316  };
317 
318  JLOG(j.error()) << (shardIndex ? "shard " + std::to_string(*shardIndex)
319  : multipleIndexPrequel())
320  << " " << msg;
321  return false;
322  };
323 
324  if (shardIndexes.empty())
325  return fail("invalid shard indexes");
326 
327  std::lock_guard lock(mutex_);
328  assert(init_);
329 
330  if (!canAdd_)
331  return fail("cannot be stored at this time");
332 
333  auto historicalShardsToPrepare = 0;
334 
335  for (auto const shardIndex : shardIndexes)
336  {
337  if (shardIndex < earliestShardIndex_)
338  {
339  return fail(
340  "comes before earliest shard index " +
342  shardIndex);
343  }
344 
345  // If we are synced to the network, check if the shard index is
346  // greater or equal to the current or validated shard index.
347  auto seqCheck = [&](std::uint32_t ledgerSeq) {
348  if (ledgerSeq >= earliestLedgerSeq_ &&
349  shardIndex >= seqToShardIndex(ledgerSeq))
350  {
351  return fail("invalid index", shardIndex);
352  }
353  return true;
354  };
355  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
357  {
358  return fail("invalid index", shardIndex);
359  }
360 
361  if (shards_.find(shardIndex) != shards_.end())
362  return fail("is already stored", shardIndex);
363 
364  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
365  return fail(
366  "is already queued for import from the shard archive handler",
367  shardIndex);
368 
370  {
371  if (auto shard = databaseImportStatus_->currentShard.lock(); shard)
372  {
373  if (shard->index() == shardIndex)
374  return fail(
375  "is being imported from the nodestore", shardIndex);
376  }
377  }
378 
379  // Any shard earlier than the two most recent shards
380  // is a historical shard
381  if (shardIndex < shardBoundaryIndex())
382  ++historicalShardsToPrepare;
383  }
384 
385  auto const numHistShards = numHistoricalShards(lock);
386 
387  // Check shard count and available storage space
388  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
389  return fail("maximum number of historical shards reached");
390 
391  if (historicalShardsToPrepare)
392  {
393  // Check available storage space for historical shards
394  if (!sufficientStorage(
395  historicalShardsToPrepare, PathDesignation::historical, lock))
396  return fail("insufficient storage space available");
397  }
398 
399  if (auto const recentShardsToPrepare =
400  shardIndexes.size() - historicalShardsToPrepare;
401  recentShardsToPrepare)
402  {
403  // Check available storage space for recent shards
404  if (!sufficientStorage(
405  recentShardsToPrepare, PathDesignation::none, lock))
406  return fail("insufficient storage space available");
407  }
408 
409  for (auto const shardIndex : shardIndexes)
410  preparedIndexes_.emplace(shardIndex);
411 
412  updatePeers(lock);
413  return true;
414 }
415 
416 void
418 {
419  std::lock_guard lock(mutex_);
420  assert(init_);
421 
422  if (preparedIndexes_.erase(shardIndex))
423  updatePeers(lock);
424 }
425 
428 {
430  {
431  std::lock_guard lock(mutex_);
432  assert(init_);
433 
434  for (auto const& shardIndex : preparedIndexes_)
435  rs.insert(shardIndex);
436  }
437 
438  if (rs.empty())
439  return {};
440 
441  return ripple::to_string(rs);
442 };
443 
444 bool
446  std::uint32_t shardIndex,
447  boost::filesystem::path const& srcDir)
448 {
449  auto fail = [&](std::string const& msg,
450  std::lock_guard<std::mutex> const& lock) {
451  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
452 
453  // Remove the failed import shard index so it can be retried
454  preparedIndexes_.erase(shardIndex);
455  updatePeers(lock);
456  return false;
457  };
458 
459  using namespace boost::filesystem;
460  try
461  {
462  if (!is_directory(srcDir) || is_empty(srcDir))
463  {
464  return fail(
465  "invalid source directory " + srcDir.string(),
467  }
468  }
469  catch (std::exception const& e)
470  {
471  return fail(
472  std::string(". Exception caught in function ") + __func__ +
473  ". Error: " + e.what(),
475  }
476 
477  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
479  if (!expectedHash)
480  return fail("expected hash not found", std::lock_guard(mutex_));
481 
482  path dstDir;
483  {
484  std::lock_guard lock(mutex_);
485  if (shards_.find(shardIndex) != shards_.end())
486  return fail("already exists", lock);
487 
488  // Check shard was prepared for import
489  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
490  return fail("was not prepared for import", lock);
491 
492  auto const pathDesignation{
493  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
494  if (!pathDesignation)
495  return fail("failed to import", lock);
496 
497  if (*pathDesignation == PathDesignation::historical)
498  dstDir = chooseHistoricalPath(lock);
499  else
500  dstDir = dir_;
501  }
502  dstDir /= std::to_string(shardIndex);
503 
504  auto renameDir = [&, fname = __func__](path const& src, path const& dst) {
505  try
506  {
507  rename(src, dst);
508  }
509  catch (std::exception const& e)
510  {
511  return fail(
512  std::string(". Exception caught in function ") + fname +
513  ". Error: " + e.what(),
515  }
516  return true;
517  };
518 
519  // Rename source directory to the shard database directory
520  if (!renameDir(srcDir, dstDir))
521  return false;
522 
523  // Create the new shard
524  auto shard{std::make_unique<Shard>(
525  app_, *this, shardIndex, dstDir.parent_path(), j_)};
526 
527  if (!shard->init(scheduler_, *ctx_) ||
528  shard->getState() != ShardState::complete)
529  {
530  shard.reset();
531  renameDir(dstDir, srcDir);
532  return fail("failed to import", std::lock_guard(mutex_));
533  }
534 
535  auto const [it, inserted] = [&]() {
536  std::lock_guard lock(mutex_);
537  preparedIndexes_.erase(shardIndex);
538  return shards_.emplace(shardIndex, std::move(shard));
539  }();
540 
541  if (!inserted)
542  {
543  shard.reset();
544  renameDir(dstDir, srcDir);
545  return fail("failed to import", std::lock_guard(mutex_));
546  }
547 
548  finalizeShard(it->second, true, expectedHash);
549  return true;
550 }
551 
554 {
555  auto const shardIndex{seqToShardIndex(ledgerSeq)};
556  {
558  {
559  std::lock_guard lock(mutex_);
560  assert(init_);
561 
562  auto const it{shards_.find(shardIndex)};
563  if (it == shards_.end())
564  return nullptr;
565  shard = it->second;
566  }
567 
568  // Ledger must be stored in a final or acquiring shard
569  switch (shard->getState())
570  {
572  break;
573  case ShardState::acquire:
574  if (shard->containsLedger(ledgerSeq))
575  break;
576  [[fallthrough]];
577  default:
578  return nullptr;
579  }
580  }
581 
582  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
583  if (!nodeObject)
584  return nullptr;
585 
586  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
587  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
588  return nullptr;
589  };
590 
591  auto ledger{std::make_shared<Ledger>(
592  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
593  app_.config(),
594  *app_.getShardFamily())};
595 
596  if (ledger->info().seq != ledgerSeq)
597  {
598  return fail(
599  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
600  }
601  if (ledger->info().hash != hash)
602  {
603  return fail(
604  "encountered invalid ledger hash " + to_string(hash) +
605  " on sequence " + std::to_string(ledgerSeq));
606  }
607 
608  ledger->setFull();
609  if (!ledger->stateMap().fetchRoot(
610  SHAMapHash{ledger->info().accountHash}, nullptr))
611  {
612  return fail(
613  "is missing root STATE node on hash " + to_string(hash) +
614  " on sequence " + std::to_string(ledgerSeq));
615  }
616 
617  if (ledger->info().txHash.isNonZero())
618  {
619  if (!ledger->txMap().fetchRoot(
620  SHAMapHash{ledger->info().txHash}, nullptr))
621  {
622  return fail(
623  "is missing root TXN node on hash " + to_string(hash) +
624  " on sequence " + std::to_string(ledgerSeq));
625  }
626  }
627  return ledger;
628 }
629 
630 void
632 {
633  auto const ledgerSeq{ledger->info().seq};
634  if (ledger->info().hash.isZero())
635  {
636  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
637  << ledgerSeq;
638  return;
639  }
640  if (ledger->info().accountHash.isZero())
641  {
642  JLOG(j_.error()) << "zero account hash for ledger sequence "
643  << ledgerSeq;
644  return;
645  }
646  if (ledger->stateMap().getHash().isNonZero() &&
647  !ledger->stateMap().isValid())
648  {
649  JLOG(j_.error()) << "invalid state map for ledger sequence "
650  << ledgerSeq;
651  return;
652  }
653  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
654  {
655  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
656  << ledgerSeq;
657  return;
658  }
659 
660  auto const shardIndex{seqToShardIndex(ledgerSeq)};
662  {
663  std::lock_guard lock(mutex_);
664  assert(init_);
665 
666  if (shardIndex != acquireIndex_)
667  {
668  JLOG(j_.trace())
669  << "shard " << shardIndex << " is not being acquired";
670  return;
671  }
672 
673  auto const it{shards_.find(shardIndex)};
674  if (it == shards_.end())
675  {
676  JLOG(j_.error())
677  << "shard " << shardIndex << " is not being acquired";
678  return;
679  }
680  shard = it->second;
681  }
682 
683  if (shard->containsLedger(ledgerSeq))
684  {
685  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
686  return;
687  }
688 
689  setStoredInShard(shard, ledger);
690 }
691 
694 {
695  std::lock_guard lock(mutex_);
696  return getShardInfo(lock);
697 }
698 
699 void
701 {
702  // Stop read threads in base before data members are destroyed
703  Database::stop();
705  {
706  std::lock_guard lock(mutex_);
707  shards.reserve(shards_.size());
708  for (auto const& [_, shard] : shards_)
709  {
710  shards.push_back(shard);
711  shard->stop();
712  }
713  shards_.clear();
714  }
715  taskQueue_.stop();
716 
717  // All shards should be expired at this point
718  for (auto const& wptr : shards)
719  {
720  if (auto const shard{wptr.lock()})
721  {
722  JLOG(j_.warn()) << " shard " << shard->index() << " unexpired";
723  }
724  }
725 
726  std::unique_lock lock(mutex_);
727 
728  // Notify the shard being imported
729  // from the node store to stop
731  {
732  // A node store import is in progress
733  if (auto importShard = databaseImportStatus_->currentShard.lock();
734  importShard)
735  importShard->stop();
736  }
737 
738  // Wait for the node store import thread
739  // if necessary
741  {
742  // Tells the import function to halt
743  haltDatabaseImport_ = true;
744 
745  // Wait for the function to exit
746  while (databaseImportStatus_)
747  {
748  // Unlock just in case the import
749  // function is waiting on the mutex
750  lock.unlock();
751 
753  lock.lock();
754  }
755 
756  // Calling join while holding the mutex_ without
757  // first making sure that doImportDatabase has
758  // exited could lead to deadlock via the mutex
759  // acquisition that occurs in that function
762  }
763 }
764 
765 void
767 {
768  std::lock_guard lock(mutex_);
769  assert(init_);
770 
771  // Only the application local node store can be imported
772  assert(&source == &app_.getNodeStore());
773 
775  {
776  assert(false);
777  JLOG(j_.error()) << "database import already in progress";
778  return;
779  }
780 
782 }
783 
784 void
786 {
787  auto shouldHalt = [this] {
788  bool expected = true;
789  return haltDatabaseImport_.compare_exchange_strong(expected, false) ||
790  isStopping();
791  };
792 
793  if (shouldHalt())
794  return;
795 
796  auto loadLedger =
797  [this](char const* const sortOrder) -> std::optional<std::uint32_t> {
799  std::uint32_t ledgerSeq{0};
801  if (sortOrder == std::string("asc"))
802  {
803  info = dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase())
804  ->getLimitedOldestLedgerInfo(earliestLedgerSeq());
805  }
806  else
807  {
808  info = dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase())
809  ->getLimitedNewestLedgerInfo(earliestLedgerSeq());
810  }
811  if (info)
812  {
813  ledger = loadLedgerHelper(*info, app_, false);
814  ledgerSeq = info->seq;
815  }
816  if (!ledger || ledgerSeq == 0)
817  {
818  JLOG(j_.error()) << "no suitable ledgers were found in"
819  " the SQLite database to import";
820  return std::nullopt;
821  }
822  return ledgerSeq;
823  };
824 
825  // Find earliest ledger sequence stored
826  auto const earliestLedgerSeq{loadLedger("asc")};
827  if (!earliestLedgerSeq)
828  return;
829 
830  auto const earliestIndex = [&] {
831  auto earliestIndex = seqToShardIndex(*earliestLedgerSeq);
832 
833  // Consider only complete shards
834  if (earliestLedgerSeq != firstLedgerSeq(earliestIndex))
835  ++earliestIndex;
836 
837  return earliestIndex;
838  }();
839 
840  // Find last ledger sequence stored
841  auto const latestLedgerSeq = loadLedger("desc");
842  if (!latestLedgerSeq)
843  return;
844 
845  auto const latestIndex = [&] {
846  auto latestIndex = seqToShardIndex(*latestLedgerSeq);
847 
848  // Consider only complete shards
849  if (latestLedgerSeq != lastLedgerSeq(latestIndex))
850  --latestIndex;
851 
852  return latestIndex;
853  }();
854 
855  if (latestIndex < earliestIndex)
856  {
857  JLOG(j_.error()) << "no suitable ledgers were found in"
858  " the SQLite database to import";
859  return;
860  }
861 
862  JLOG(j_.debug()) << "Importing ledgers for shards " << earliestIndex
863  << " through " << latestIndex;
864 
865  {
866  std::lock_guard lock(mutex_);
867 
868  assert(!databaseImportStatus_);
869  databaseImportStatus_ = std::make_unique<DatabaseImportStatus>(
870  earliestIndex, latestIndex, 0);
871  }
872 
873  // Import the shards
874  for (std::uint32_t shardIndex = earliestIndex; shardIndex <= latestIndex;
875  ++shardIndex)
876  {
877  if (shouldHalt())
878  return;
879 
880  auto const pathDesignation = [this, shardIndex] {
881  std::lock_guard lock(mutex_);
882 
883  auto const numHistShards = numHistoricalShards(lock);
884  auto const pathDesignation =
885  prepareForNewShard(shardIndex, numHistShards, lock);
886 
887  return pathDesignation;
888  }();
889 
890  if (!pathDesignation)
891  break;
892 
893  {
894  std::lock_guard lock(mutex_);
895 
896  // Skip if being acquired
897  if (shardIndex == acquireIndex_)
898  {
899  JLOG(j_.debug())
900  << "shard " << shardIndex << " already being acquired";
901  continue;
902  }
903 
904  // Skip if being imported from the shard archive handler
905  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
906  {
907  JLOG(j_.debug())
908  << "shard " << shardIndex << " already being imported";
909  continue;
910  }
911 
912  // Skip if stored
913  if (shards_.find(shardIndex) != shards_.end())
914  {
915  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
916  continue;
917  }
918  }
919 
920  std::uint32_t const firstSeq = firstLedgerSeq(shardIndex);
921  std::uint32_t const lastSeq =
922  std::max(firstSeq, lastLedgerSeq(shardIndex));
923 
924  // Verify SQLite ledgers are in the node store
925  {
926  auto const ledgerHashes{
928  firstSeq, lastSeq)};
929  if (ledgerHashes.size() != maxLedgers(shardIndex))
930  continue;
931 
932  auto& source = app_.getNodeStore();
933  bool valid{true};
934 
935  for (std::uint32_t n = firstSeq; n <= lastSeq; ++n)
936  {
937  if (!source.fetchNodeObject(ledgerHashes.at(n).ledgerHash, n))
938  {
939  JLOG(j_.warn()) << "SQLite ledger sequence " << n
940  << " mismatches node store";
941  valid = false;
942  break;
943  }
944  }
945  if (!valid)
946  continue;
947  }
948 
949  if (shouldHalt())
950  return;
951 
952  bool const needsHistoricalPath =
953  *pathDesignation == PathDesignation::historical;
954 
955  auto const path = needsHistoricalPath
957  : dir_;
958 
959  // Create the new shard
960  auto shard{std::make_shared<Shard>(app_, *this, shardIndex, path, j_)};
961  if (!shard->init(scheduler_, *ctx_))
962  continue;
963 
964  {
965  std::lock_guard lock(mutex_);
966 
967  if (shouldHalt())
968  return;
969 
970  databaseImportStatus_->currentIndex = shardIndex;
971  databaseImportStatus_->currentShard = shard;
972  databaseImportStatus_->firstSeq = firstSeq;
973  databaseImportStatus_->lastSeq = lastSeq;
974  }
975 
976  // Create a marker file to signify a database import in progress
977  auto const shardDir{path / std::to_string(shardIndex)};
978  auto const markerFile{shardDir / databaseImportMarker_};
979  {
980  std::ofstream ofs{markerFile.string()};
981  if (!ofs.is_open())
982  {
983  JLOG(j_.error()) << "shard " << shardIndex
984  << " failed to create temp marker file";
985  shard->removeOnDestroy();
986  continue;
987  }
988  }
989 
990  // Copy the ledgers from node store
991  std::shared_ptr<Ledger> recentStored;
992  std::optional<uint256> lastLedgerHash;
993 
994  while (auto const ledgerSeq = shard->prepare())
995  {
996  if (shouldHalt())
997  return;
998 
999  // Not const so it may be moved later
1000  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
1001  if (!ledger || ledger->info().seq != ledgerSeq)
1002  break;
1003 
1004  auto const result{shard->storeLedger(ledger, recentStored)};
1005  storeStats(result.count, result.size);
1006  if (result.error)
1007  break;
1008 
1009  if (!shard->setLedgerStored(ledger))
1010  break;
1011 
1012  if (!lastLedgerHash && ledgerSeq == lastSeq)
1013  lastLedgerHash = ledger->info().hash;
1014 
1015  recentStored = std::move(ledger);
1016  }
1017 
1018  if (shouldHalt())
1019  return;
1020 
1021  using namespace boost::filesystem;
1022  bool success{false};
1023  if (lastLedgerHash && shard->getState() == ShardState::complete)
1024  {
1025  // Store shard final key
1026  Serializer s;
1027  s.add32(Shard::version);
1028  s.add32(firstLedgerSeq(shardIndex));
1029  s.add32(lastLedgerSeq(shardIndex));
1030  s.addBitString(*lastLedgerHash);
1031  auto const nodeObject{NodeObject::createObject(
1032  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
1033 
1034  if (shard->storeNodeObject(nodeObject))
1035  {
1036  try
1037  {
1038  std::lock_guard lock(mutex_);
1039 
1040  // The database import process is complete and the
1041  // marker file is no longer required
1042  remove_all(markerFile);
1043 
1044  JLOG(j_.debug()) << "shard " << shardIndex
1045  << " was successfully imported"
1046  " from the NodeStore";
1047  finalizeShard(
1048  shards_.emplace(shardIndex, std::move(shard))
1049  .first->second,
1050  true,
1051  std::nullopt);
1052 
1053  // This variable is meant to capture the success
1054  // of everything up to the point of shard finalization.
1055  // If the shard fails to finalize, this condition will
1056  // be handled by the finalization function itself, and
1057  // not here.
1058  success = true;
1059  }
1060  catch (std::exception const& e)
1061  {
1062  JLOG(j_.fatal()) << "shard index " << shardIndex
1063  << ". Exception caught in function "
1064  << __func__ << ". Error: " << e.what();
1065  }
1066  }
1067  }
1068 
1069  if (!success)
1070  {
1071  JLOG(j_.error()) << "shard " << shardIndex
1072  << " failed to import from the NodeStore";
1073 
1074  if (shard)
1075  shard->removeOnDestroy();
1076  }
1077  }
1078 
1079  if (shouldHalt())
1080  return;
1081 
1082  updateFileStats();
1083 }
1084 
1087 {
1088  std::shared_ptr<Shard> shard;
1089  {
1090  std::lock_guard lock(mutex_);
1091  assert(init_);
1092 
1093  auto const it{shards_.find(acquireIndex_)};
1094  if (it == shards_.end())
1095  return 0;
1096  shard = it->second;
1097  }
1098 
1099  return shard->getWriteLoad();
1100 }
1101 
1102 void
1104  NodeObjectType type,
1105  Blob&& data,
1106  uint256 const& hash,
1107  std::uint32_t ledgerSeq)
1108 {
1109  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1110  std::shared_ptr<Shard> shard;
1111  {
1112  std::lock_guard lock(mutex_);
1113  if (shardIndex != acquireIndex_)
1114  {
1115  JLOG(j_.trace())
1116  << "shard " << shardIndex << " is not being acquired";
1117  return;
1118  }
1119 
1120  auto const it{shards_.find(shardIndex)};
1121  if (it == shards_.end())
1122  {
1123  JLOG(j_.error())
1124  << "shard " << shardIndex << " is not being acquired";
1125  return;
1126  }
1127  shard = it->second;
1128  }
1129 
1130  auto const nodeObject{
1131  NodeObject::createObject(type, std::move(data), hash)};
1132  if (shard->storeNodeObject(nodeObject))
1133  storeStats(1, nodeObject->getData().size());
1134 }
1135 
1136 bool
1138 {
1139  auto const ledgerSeq{srcLedger->info().seq};
1140  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1141  std::shared_ptr<Shard> shard;
1142  {
1143  std::lock_guard lock(mutex_);
1144  assert(init_);
1145 
1146  if (shardIndex != acquireIndex_)
1147  {
1148  JLOG(j_.trace())
1149  << "shard " << shardIndex << " is not being acquired";
1150  return false;
1151  }
1152 
1153  auto const it{shards_.find(shardIndex)};
1154  if (it == shards_.end())
1155  {
1156  JLOG(j_.error())
1157  << "shard " << shardIndex << " is not being acquired";
1158  return false;
1159  }
1160  shard = it->second;
1161  }
1162 
1163  auto const result{shard->storeLedger(srcLedger, nullptr)};
1164  storeStats(result.count, result.size);
1165  if (result.error || result.count == 0 || result.size == 0)
1166  return false;
1167 
1168  return setStoredInShard(shard, srcLedger);
1169 }
1170 
1171 void
1173 {
1175  {
1176  std::lock_guard lock(mutex_);
1177  assert(init_);
1178 
1179  shards.reserve(shards_.size());
1180  for (auto const& e : shards_)
1181  shards.push_back(e.second);
1182  }
1183 
1185  openFinals.reserve(openFinalLimit_);
1186 
1187  for (auto const& weak : shards)
1188  {
1189  if (auto const shard{weak.lock()}; shard && shard->isOpen())
1190  {
1191  if (shard->getState() == ShardState::finalized)
1192  openFinals.emplace_back(std::move(shard));
1193  }
1194  }
1195 
1196  if (openFinals.size() > openFinalLimit_)
1197  {
1198  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1199  << openFinalLimit_ << " by "
1200  << (openFinals.size() - openFinalLimit_);
1201 
1202  // Try to close enough shards to be within the limit.
1203  // Sort ascending on last use so the oldest are removed first.
1204  std::sort(
1205  openFinals.begin(),
1206  openFinals.end(),
1207  [&](std::shared_ptr<Shard> const& lhsShard,
1208  std::shared_ptr<Shard> const& rhsShard) {
1209  return lhsShard->getLastUse() < rhsShard->getLastUse();
1210  });
1211 
1212  for (auto it{openFinals.cbegin()};
1213  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1214  {
1215  if ((*it)->tryClose())
1216  it = openFinals.erase(it);
1217  else
1218  ++it;
1219  }
1220  }
1221 }
1222 
1225 {
1227  {
1229 
1230  ret[jss::firstShardIndex] = databaseImportStatus_->earliestIndex;
1231  ret[jss::lastShardIndex] = databaseImportStatus_->latestIndex;
1232  ret[jss::currentShardIndex] = databaseImportStatus_->currentIndex;
1233 
1234  Json::Value currentShard(Json::objectValue);
1235  currentShard[jss::firstSequence] = databaseImportStatus_->firstSeq;
1236  currentShard[jss::lastSequence] = databaseImportStatus_->lastSeq;
1237 
1238  if (auto shard = databaseImportStatus_->currentShard.lock(); shard)
1239  currentShard[jss::storedSeqs] = shard->getStoredSeqs();
1240 
1241  ret[jss::currentShard] = currentShard;
1242 
1243  if (haltDatabaseImport_)
1244  ret[jss::message] = "Database import halt initiated...";
1245 
1246  return ret;
1247  }
1248 
1249  return RPC::make_error(rpcINTERNAL, "Database import not running");
1250 }
1251 
1254 {
1255  std::lock_guard lock(mutex_);
1256 
1257  if (!init_)
1258  return RPC::make_error(rpcINTERNAL, "Shard store not initialized");
1259 
1261  return RPC::make_error(
1262  rpcINTERNAL, "Database import already in progress");
1263 
1264  if (isStopping())
1265  return RPC::make_error(rpcINTERNAL, "Node is shutting down");
1266 
1268 
1270  result[jss::message] = "Database import initiated...";
1271 
1272  return result;
1273 }
1274 
1277 {
1278  std::lock_guard lock(mutex_);
1279 
1280  if (!init_)
1281  return RPC::make_error(rpcINTERNAL, "Shard store not initialized");
1282 
1283  if (!databaseImporter_.joinable())
1284  return RPC::make_error(rpcINTERNAL, "Database import not running");
1285 
1286  if (isStopping())
1287  return RPC::make_error(rpcINTERNAL, "Node is shutting down");
1288 
1289  haltDatabaseImport_ = true;
1290 
1292  result[jss::message] = "Database import halt initiated...";
1293 
1294  return result;
1295 }
1296 
1299 {
1300  std::lock_guard lock(mutex_);
1301 
1302  if (!databaseImportStatus_)
1303  return {};
1304 
1305  return databaseImportStatus_->firstSeq;
1306 }
1307 
1308 bool
1310 {
1311  auto fail = [j = j_](std::string const& msg) {
1312  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1313  return false;
1314  };
1315 
1316  Config const& config{app_.config()};
1317  Section const& section{config.section(ConfigSection::shardDatabase())};
1318 
1319  auto compare = [&](std::string const& name, std::uint32_t defaultValue) {
1320  std::uint32_t shardDBValue{defaultValue};
1321  get_if_exists<std::uint32_t>(section, name, shardDBValue);
1322 
1323  std::uint32_t nodeDBValue{defaultValue};
1324  get_if_exists<std::uint32_t>(
1325  config.section(ConfigSection::nodeDatabase()), name, nodeDBValue);
1326 
1327  return shardDBValue == nodeDBValue;
1328  };
1329 
1330  // If ledgers_per_shard or earliest_seq are specified,
1331  // they must be equally assigned in 'node_db'
1332  if (!compare("ledgers_per_shard", DEFAULT_LEDGERS_PER_SHARD))
1333  {
1334  return fail(
1335  "and [" + ConfigSection::nodeDatabase() + "] define different '" +
1336  "ledgers_per_shard" + "' values");
1337  }
1338  if (!compare("earliest_seq", XRP_LEDGER_EARLIEST_SEQ))
1339  {
1340  return fail(
1341  "and [" + ConfigSection::nodeDatabase() + "] define different '" +
1342  "earliest_seq" + "' values");
1343  }
1344 
1345  using namespace boost::filesystem;
1346  if (!get_if_exists<path>(section, "path", dir_))
1347  return fail("'path' missing");
1348 
1349  {
1350  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1351 
1352  Section const& historicalShardPaths =
1353  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1354 
1355  auto values = historicalShardPaths.values();
1356 
1357  std::sort(values.begin(), values.end());
1358  values.erase(std::unique(values.begin(), values.end()), values.end());
1359 
1360  for (auto const& s : values)
1361  {
1362  auto const dir = path(s);
1363  if (dir_ == dir)
1364  {
1365  return fail(
1366  "the 'path' cannot also be in the "
1367  "'historical_shard_path' section");
1368  }
1369 
1371  }
1372  }
1373 
1374  // NuDB is the default and only supported permanent storage backend
1375  backendName_ = get(section, "type", "nudb");
1376  if (!boost::iequals(backendName_, "NuDB"))
1377  return fail("'type' value unsupported");
1378 
1379  return true;
1380 }
1381 
1384  uint256 const& hash,
1385  std::uint32_t ledgerSeq,
1386  FetchReport& fetchReport,
1387  bool duplicate)
1388 {
1389  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1390  std::shared_ptr<Shard> shard;
1391  {
1392  std::lock_guard lock(mutex_);
1393  auto const it{shards_.find(shardIndex)};
1394  if (it == shards_.end())
1395  return nullptr;
1396  shard = it->second;
1397  }
1398 
1399  return shard->fetchNodeObject(hash, fetchReport);
1400 }
1401 
1404  std::uint32_t validLedgerSeq,
1406 {
1407  if (validLedgerSeq < earliestLedgerSeq_)
1408  return std::nullopt;
1409 
1410  auto const maxShardIndex{[this, validLedgerSeq]() {
1411  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1412  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1413  --shardIndex;
1414  return shardIndex;
1415  }()};
1416  auto const maxNumShards{maxShardIndex - earliestShardIndex_ + 1};
1417 
1418  // Check if the shard store has all shards
1419  if (shards_.size() >= maxNumShards)
1420  return std::nullopt;
1421 
1422  if (maxShardIndex < 1024 ||
1423  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1424  {
1425  // Small or mostly full index space to sample
1426  // Find the available indexes and select one at random
1428  available.reserve(maxNumShards - shards_.size());
1429 
1430  for (auto shardIndex = earliestShardIndex_; shardIndex <= maxShardIndex;
1431  ++shardIndex)
1432  {
1433  if (shards_.find(shardIndex) == shards_.end() &&
1434  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1435  {
1436  available.push_back(shardIndex);
1437  }
1438  }
1439 
1440  if (available.empty())
1441  return std::nullopt;
1442 
1443  if (available.size() == 1)
1444  return available.front();
1445 
1446  return available[rand_int(
1447  0u, static_cast<std::uint32_t>(available.size() - 1))];
1448  }
1449 
1450  // Large, sparse index space to sample
1451  // Keep choosing indexes at random until an available one is found
1452  // chances of running more than 30 times is less than 1 in a billion
1453  for (int i = 0; i < 40; ++i)
1454  {
1455  auto const shardIndex{rand_int(earliestShardIndex_, maxShardIndex)};
1456  if (shards_.find(shardIndex) == shards_.end() &&
1457  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1458  {
1459  return shardIndex;
1460  }
1461  }
1462 
1463  assert(false);
1464  return std::nullopt;
1465 }
1466 
1467 void
1469  std::shared_ptr<Shard>& shard,
1470  bool const writeSQLite,
1471  std::optional<uint256> const& expectedHash)
1472 {
1473  taskQueue_.addTask([this,
1474  wptr = std::weak_ptr<Shard>(shard),
1475  writeSQLite,
1476  expectedHash]() {
1477  if (isStopping())
1478  return;
1479 
1480  auto shard{wptr.lock()};
1481  if (!shard)
1482  {
1483  JLOG(j_.debug()) << "Shard removed before being finalized";
1484  return;
1485  }
1486 
1487  if (!shard->finalize(writeSQLite, expectedHash))
1488  {
1489  if (isStopping())
1490  return;
1491 
1492  // Invalid or corrupt shard, remove it
1493  removeFailedShard(shard);
1494  return;
1495  }
1496 
1497  if (isStopping())
1498  return;
1499 
1500  {
1501  auto const boundaryIndex{shardBoundaryIndex()};
1502  std::lock_guard lock(mutex_);
1503 
1504  if (shard->index() < boundaryIndex)
1505  {
1506  // This is a historical shard
1507  if (!historicalPaths_.empty() &&
1508  shard->getDir().parent_path() == dir_)
1509  {
1510  // Shard wasn't placed at a separate historical path
1511  JLOG(j_.warn()) << "shard " << shard->index()
1512  << " is not stored at a historical path";
1513  }
1514  }
1515  else
1516  {
1517  // Not a historical shard. Shift recent shards if necessary
1518  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1519  relocateOutdatedShards(lock);
1520 
1521  // Set the appropriate recent shard index
1522  if (shard->index() == boundaryIndex)
1523  secondLatestShardIndex_ = shard->index();
1524  else
1525  latestShardIndex_ = shard->index();
1526 
1527  if (shard->getDir().parent_path() != dir_)
1528  {
1529  JLOG(j_.warn()) << "shard " << shard->index()
1530  << " is not stored at the path";
1531  }
1532  }
1533 
1534  updatePeers(lock);
1535  }
1536 
1537  updateFileStats();
1538  });
1539 }
1540 
1541 void
1543 {
1545  {
1546  std::lock_guard lock(mutex_);
1547  if (shards_.empty())
1548  return;
1549 
1550  shards.reserve(shards_.size());
1551  for (auto const& e : shards_)
1552  shards.push_back(e.second);
1553  }
1554 
1555  std::uint64_t sumSz{0};
1556  std::uint32_t sumFd{0};
1557  std::uint32_t numShards{0};
1558  for (auto const& weak : shards)
1559  {
1560  if (auto const shard{weak.lock()}; shard)
1561  {
1562  auto const [sz, fd] = shard->getFileInfo();
1563  sumSz += sz;
1564  sumFd += fd;
1565  ++numShards;
1566  }
1567  }
1568 
1569  std::lock_guard lock(mutex_);
1570  fileSz_ = sumSz;
1571  fdRequired_ = sumFd;
1572  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1573 
1574  if (!canAdd_)
1575  return;
1576 
1577  if (auto const count = numHistoricalShards(lock);
1578  count >= maxHistoricalShards_)
1579  {
1581  {
1582  // In order to avoid excessive output, don't produce
1583  // this warning if the server isn't configured to
1584  // store historical shards.
1585  JLOG(j_.warn()) << "maximum number of historical shards reached";
1586  }
1587 
1588  canAdd_ = false;
1589  }
1590  else if (!sufficientStorage(
1591  maxHistoricalShards_ - count,
1593  lock))
1594  {
1595  JLOG(j_.warn())
1596  << "maximum shard store size exceeds available storage space";
1597 
1598  canAdd_ = false;
1599  }
1600 }
1601 
1602 bool
1604  std::uint32_t numShards,
1605  PathDesignation pathDesignation,
1606  std::lock_guard<std::mutex> const&) const
1607 {
1608  try
1609  {
1610  std::vector<std::uint64_t> capacities;
1611 
1612  if (pathDesignation == PathDesignation::historical &&
1614  {
1615  capacities.reserve(historicalPaths_.size());
1616 
1617  for (auto const& path : historicalPaths_)
1618  {
1619  // Get the available storage for each historical path
1620  auto const availableSpace =
1621  boost::filesystem::space(path).available;
1622 
1623  capacities.push_back(availableSpace);
1624  }
1625  }
1626  else
1627  {
1628  // Get the available storage for the main shard path
1629  capacities.push_back(boost::filesystem::space(dir_).available);
1630  }
1631 
1632  for (std::uint64_t const capacity : capacities)
1633  {
1634  // Leverage all the historical shard paths to
1635  // see if collectively they can fit the specified
1636  // number of shards. For this to work properly,
1637  // each historical path must correspond to a separate
1638  // physical device or filesystem.
1639 
1640  auto const shardCap = capacity / avgShardFileSz_;
1641  if (numShards <= shardCap)
1642  return true;
1643 
1644  numShards -= shardCap;
1645  }
1646  }
1647  catch (std::exception const& e)
1648  {
1649  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1650  << ". Error: " << e.what();
1651  return false;
1652  }
1653 
1654  return false;
1655 }
1656 
1657 bool
1659  std::shared_ptr<Shard>& shard,
1660  std::shared_ptr<Ledger const> const& ledger)
1661 {
1662  if (!shard->setLedgerStored(ledger))
1663  {
1664  // Invalid or corrupt shard, remove it
1665  removeFailedShard(shard);
1666  return false;
1667  }
1668 
1669  if (shard->getState() == ShardState::complete)
1670  {
1671  std::lock_guard lock(mutex_);
1672  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1673  {
1674  if (shard->index() == acquireIndex_)
1675  acquireIndex_ = 0;
1676 
1677  finalizeShard(it->second, false, std::nullopt);
1678  }
1679  else
1680  {
1681  JLOG(j_.debug())
1682  << "shard " << shard->index() << " is no longer being acquired";
1683  }
1684  }
1685 
1686  updateFileStats();
1687  return true;
1688 }
1689 
1690 void
1692 {
1693  {
1694  std::lock_guard lock(mutex_);
1695 
1696  if (shard->index() == acquireIndex_)
1697  acquireIndex_ = 0;
1698 
1699  if (shard->index() == latestShardIndex_)
1700  latestShardIndex_ = std::nullopt;
1701 
1702  if (shard->index() == secondLatestShardIndex_)
1703  secondLatestShardIndex_ = std::nullopt;
1704  }
1705 
1706  shard->removeOnDestroy();
1707 
1708  // Reset the shared_ptr to invoke the shard's
1709  // destructor and remove it from the server
1710  shard.reset();
1711  updateFileStats();
1712 }
1713 
1716 {
1717  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1718 
1719  if (validIndex < earliestLedgerSeq_)
1720  return 0;
1721 
1722  // Shards with an index earlier than the recent shard boundary index
1723  // are considered historical. The three shards at or later than
1724  // this index consist of the two most recently validated shards
1725  // and the shard still in the process of being built by live
1726  // transactions.
1727  return seqToShardIndex(validIndex) - 1;
1728 }
1729 
1732  std::lock_guard<std::mutex> const& lock) const
1733 {
1734  auto const boundaryIndex{shardBoundaryIndex()};
1735  return std::count_if(
1736  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1737  return entry.first < boundaryIndex;
1738  });
1739 }
1740 
1741 void
1743  std::lock_guard<std::mutex> const& lock)
1744 {
1745  auto& cur{latestShardIndex_};
1746  auto& prev{secondLatestShardIndex_};
1747  if (!cur && !prev)
1748  return;
1749 
1750  auto const latestShardIndex =
1752  auto const separateHistoricalPath = !historicalPaths_.empty();
1753 
1754  auto const removeShard = [this](std::uint32_t const shardIndex) -> void {
1755  canAdd_ = false;
1756 
1757  if (auto it = shards_.find(shardIndex); it != shards_.end())
1758  {
1759  if (it->second)
1760  removeFailedShard(it->second);
1761  else
1762  {
1763  JLOG(j_.warn()) << "can't find shard to remove";
1764  }
1765  }
1766  else
1767  {
1768  JLOG(j_.warn()) << "can't find shard to remove";
1769  }
1770  };
1771 
1772  auto const keepShard = [this, &lock, removeShard, separateHistoricalPath](
1773  std::uint32_t const shardIndex) -> bool {
1775  {
1776  JLOG(j_.error()) << "maximum number of historical shards reached";
1777  removeShard(shardIndex);
1778  return false;
1779  }
1780  if (separateHistoricalPath &&
1782  {
1783  JLOG(j_.error()) << "insufficient storage space available";
1784  removeShard(shardIndex);
1785  return false;
1786  }
1787 
1788  return true;
1789  };
1790 
1791  // Move a shard from the main shard path to a historical shard
1792  // path by copying the contents, and creating a new shard.
1793  auto const moveShard = [this,
1794  &lock](std::uint32_t const shardIndex) -> void {
1795  auto it{shards_.find(shardIndex)};
1796  if (it == shards_.end())
1797  {
1798  JLOG(j_.warn()) << "can't find shard to move to historical path";
1799  return;
1800  }
1801 
1802  auto& shard{it->second};
1803 
1804  // Close any open file descriptors before moving the shard
1805  // directory. Don't call removeOnDestroy since that would
1806  // attempt to close the fds after the directory has been moved.
1807  if (!shard->tryClose())
1808  {
1809  JLOG(j_.warn()) << "can't close shard to move to historical path";
1810  return;
1811  }
1812 
1813  auto const dst{chooseHistoricalPath(lock)};
1814  try
1815  {
1816  // Move the shard directory to the new path
1817  boost::filesystem::rename(
1818  shard->getDir().string(), dst / std::to_string(shardIndex));
1819  }
1820  catch (...)
1821  {
1822  JLOG(j_.error()) << "shard " << shardIndex
1823  << " failed to move to historical storage";
1824  return;
1825  }
1826 
1827  // Create a shard instance at the new location
1828  shard = std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1829 
1830  // Open the new shard
1831  if (!shard->init(scheduler_, *ctx_))
1832  {
1833  JLOG(j_.error()) << "shard " << shardIndex
1834  << " failed to open in historical storage";
1835  shard->removeOnDestroy();
1836  shard.reset();
1837  }
1838  };
1839 
1840  // See if either of the recent shards needs to be updated
1841  bool const curNotSynched =
1842  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1843  bool const prevNotSynched = secondLatestShardIndex_ &&
1844  *secondLatestShardIndex_ != latestShardIndex - 1;
1845 
1846  // A new shard has been published. Move outdated
1847  // shards to historical storage as needed
1848  if (curNotSynched || prevNotSynched)
1849  {
1850  if (prev)
1851  {
1852  // Move the formerly second latest shard to historical storage
1853  if (keepShard(*prev) && separateHistoricalPath)
1854  moveShard(*prev);
1855 
1856  prev = std::nullopt;
1857  }
1858 
1859  if (cur)
1860  {
1861  // The formerly latest shard is now the second latest
1862  if (cur == latestShardIndex - 1)
1863  prev = cur;
1864 
1865  // The formerly latest shard is no longer a 'recent' shard
1866  else
1867  {
1868  // Move the formerly latest shard to historical storage
1869  if (keepShard(*cur) && separateHistoricalPath)
1870  moveShard(*cur);
1871  }
1872 
1873  cur = std::nullopt;
1874  }
1875  }
1876 }
1877 
1878 auto
1880  std::uint32_t shardIndex,
1883 {
1884  // Any shard earlier than the two most recent shards is a historical shard
1885  auto const boundaryIndex{shardBoundaryIndex()};
1886  auto const isHistoricalShard = shardIndex < boundaryIndex;
1887 
1888  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1891 
1892  // Check shard count and available storage space
1893  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1894  {
1895  JLOG(j_.error()) << "maximum number of historical shards reached";
1896  canAdd_ = false;
1897  return std::nullopt;
1898  }
1899  if (!sufficientStorage(1, designation, lock))
1900  {
1901  JLOG(j_.error()) << "insufficient storage space available";
1902  canAdd_ = false;
1903  return std::nullopt;
1904  }
1905 
1906  return designation;
1907 }
1908 
1909 boost::filesystem::path
1911 {
1912  // If not configured with separate historical paths,
1913  // use the main path (dir_) by default.
1914  if (historicalPaths_.empty())
1915  return dir_;
1916 
1917  boost::filesystem::path historicalShardPath;
1918  std::vector<boost::filesystem::path> potentialPaths;
1919 
1920  for (boost::filesystem::path const& path : historicalPaths_)
1921  {
1922  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1923  potentialPaths.push_back(path);
1924  }
1925 
1926  if (potentialPaths.empty())
1927  {
1928  JLOG(j_.error()) << "failed to select a historical shard path";
1929  return "";
1930  }
1931 
1932  std::sample(
1933  potentialPaths.begin(),
1934  potentialPaths.end(),
1935  &historicalShardPath,
1936  1,
1937  default_prng());
1938 
1939  return historicalShardPath;
1940 }
1941 
1942 bool
1944 {
1945 #if BOOST_OS_LINUX
1946  // Each historical shard path must correspond
1947  // to a directory on a distinct device or file system.
1948  // Currently, this constraint is enforced only on Linux.
1951 
1952  for (auto const& path : historicalPaths_)
1953  {
1954  struct statvfs buffer;
1955  if (statvfs(path.c_str(), &buffer))
1956  {
1957  JLOG(j_.error())
1958  << "failed to acquire stats for 'historical_shard_path': "
1959  << path;
1960  return false;
1961  }
1962 
1963  filesystemIDs[buffer.f_fsid].push_back(path.string());
1964  }
1965 
1966  bool ret = true;
1967  for (auto const& entry : filesystemIDs)
1968  {
1969  // Check to see if any of the paths are stored on the same file system
1970  if (entry.second.size() > 1)
1971  {
1972  // Two or more historical storage paths
1973  // correspond to the same file system.
1974  JLOG(j_.error())
1975  << "The following paths correspond to the same filesystem: "
1976  << boost::algorithm::join(entry.second, ", ")
1977  << ". Each configured historical storage path should"
1978  " be on a unique device or filesystem.";
1979 
1980  ret = false;
1981  }
1982  }
1983 
1984  return ret;
1985 
1986 #else
1987  // The requirement that each historical storage path
1988  // corresponds to a distinct device or file system is
1989  // enforced only on Linux, so on other platforms
1990  // keep track of the available capacities for each
1991  // path. Issue a warning if we suspect any of the paths
1992  // may violate this requirement.
1993 
1994  // Map byte counts to each path that shares that byte count.
1996  uniqueCapacities(historicalPaths_.size());
1997 
1998  for (auto const& path : historicalPaths_)
1999  uniqueCapacities[boost::filesystem::space(path).available].push_back(
2000  path.string());
2001 
2002  for (auto const& entry : uniqueCapacities)
2003  {
2004  // Check to see if any paths have the same amount of available bytes.
2005  if (entry.second.size() > 1)
2006  {
2007  // Two or more historical storage paths may
2008  // correspond to the same device or file system.
2009  JLOG(j_.warn())
2010  << "Each of the following paths have " << entry.first
2011  << " bytes free, and may be located on the same device"
2012  " or file system: "
2013  << boost::algorithm::join(entry.second, ", ")
2014  << ". Each configured historical storage path should"
2015  " be on a unique device or file system.";
2016  }
2017  }
2018 #endif
2019 
2020  return true;
2021 }
2022 
2023 bool
2025  LedgerIndex ledgerSeq,
2026  std::function<bool(soci::session& session)> const& callback)
2027 {
2028  if (ledgerSeq < earliestLedgerSeq_)
2029  {
2030  JLOG(j_.warn()) << "callForLedgerSQLByLedgerSeq ledger seq too early: "
2031  << ledgerSeq;
2032  return false;
2033  }
2034 
2035  return callForLedgerSQLByShardIndex(seqToShardIndex(ledgerSeq), callback);
2036 }
2037 
2038 bool
2040  const uint32_t shardIndex,
2041  std::function<bool(soci::session& session)> const& callback)
2042 {
2043  std::lock_guard lock(mutex_);
2044 
2045  auto const it{shards_.find(shardIndex)};
2046 
2047  return it != shards_.end() &&
2048  it->second->getState() == ShardState::finalized &&
2049  it->second->callForLedgerSQL(callback);
2050 }
2051 
2052 bool
2054  LedgerIndex ledgerSeq,
2055  std::function<bool(soci::session& session)> const& callback)
2056 {
2058  seqToShardIndex(ledgerSeq), callback);
2059 }
2060 
2061 bool
2063  std::uint32_t const shardIndex,
2064  std::function<bool(soci::session& session)> const& callback)
2065 {
2066  std::lock_guard lock(mutex_);
2067 
2068  auto const it{shards_.find(shardIndex)};
2069 
2070  return it != shards_.end() &&
2071  it->second->getState() == ShardState::finalized &&
2072  it->second->callForTransactionSQL(callback);
2073 }
2074 
2075 bool
2077  std::optional<std::uint32_t> minShardIndex,
2078  std::function<bool(Shard& shard)> const& visit)
2079 {
2080  std::lock_guard lock(mutex_);
2081 
2083 
2084  if (!minShardIndex)
2085  it = shards_.begin();
2086  else
2087  it = shards_.lower_bound(*minShardIndex);
2088 
2089  eit = shards_.end();
2090 
2091  for (; it != eit; it++)
2092  {
2093  if (it->second->getState() == ShardState::finalized)
2094  {
2095  if (!visit(*it->second))
2096  return false;
2097  }
2098  }
2099 
2100  return true;
2101 }
2102 
2103 bool
2105  std::optional<std::uint32_t> minShardIndex,
2106  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2107  callback)
2108 {
2109  return iterateShardsForward(
2110  minShardIndex, [&callback](Shard& shard) -> bool {
2111  return shard.callForLedgerSQL(callback);
2112  });
2113 }
2114 
2115 bool
2117  std::optional<std::uint32_t> minShardIndex,
2118  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2119  callback)
2120 {
2121  return iterateShardsForward(
2122  minShardIndex, [&callback](Shard& shard) -> bool {
2123  return shard.callForTransactionSQL(callback);
2124  });
2125 }
2126 
2127 bool
2129  std::optional<std::uint32_t> maxShardIndex,
2130  std::function<bool(Shard& shard)> const& visit)
2131 {
2132  std::lock_guard lock(mutex_);
2133 
2134  std::map<std::uint32_t, std::shared_ptr<Shard>>::reverse_iterator it, eit;
2135 
2136  if (!maxShardIndex)
2137  it = shards_.rbegin();
2138  else
2139  it = std::make_reverse_iterator(shards_.upper_bound(*maxShardIndex));
2140 
2141  eit = shards_.rend();
2142 
2143  for (; it != eit; it++)
2144  {
2145  if (it->second->getState() == ShardState::finalized &&
2146  (!maxShardIndex || it->first <= *maxShardIndex))
2147  {
2148  if (!visit(*it->second))
2149  return false;
2150  }
2151  }
2152 
2153  return true;
2154 }
2155 
2156 bool
2158  std::optional<std::uint32_t> maxShardIndex,
2159  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2160  callback)
2161 {
2162  return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool {
2163  return shard.callForLedgerSQL(callback);
2164  });
2165 }
2166 
2167 bool
2169  std::optional<std::uint32_t> maxShardIndex,
2170  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2171  callback)
2172 {
2173  return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool {
2174  return shard.callForTransactionSQL(callback);
2175  });
2176 }
2177 
2180 {
2181  auto shardInfo{std::make_unique<ShardInfo>()};
2182  for (auto const& [_, shard] : shards_)
2183  {
2184  shardInfo->update(
2185  shard->index(), shard->getState(), shard->getPercentProgress());
2186  }
2187 
2188  for (auto const shardIndex : preparedIndexes_)
2189  shardInfo->update(shardIndex, ShardState::queued, 0);
2190 
2191  return shardInfo;
2192 }
2193 
2194 size_t
2196 {
2197  std::lock_guard lock(mutex_);
2198  return taskQueue_.size();
2199 }
2200 
2201 void
2203 {
2204  if (!app_.config().standalone() &&
2206  {
2207  auto const message{getShardInfo(lock)->makeMessage(app_)};
2208  app_.overlay().foreach(send_always(std::make_shared<Message>(
2209  message, protocol::mtPEER_SHARD_INFO_V2)));
2210  }
2211 }
2212 
2213 void
2215 {
2216  // Run the lengthy node store import process in the background
2217  // on a dedicated thread.
2218  databaseImporter_ = std::thread([this] {
2219  doImportDatabase();
2220 
2221  std::lock_guard lock(mutex_);
2222 
2223  // Make sure to clear this in case the import
2224  // exited early.
2225  databaseImportStatus_.reset();
2226 
2227  // Detach the thread so subsequent attempts
2228  // to start the import won't get held up by
2229  // the old thread of execution
2231  });
2232 }
2233 
2234 //------------------------------------------------------------------------------
2235 
2238  Application& app,
2239  Scheduler& scheduler,
2240  int readThreads,
2241  beast::Journal j)
2242 {
2243  // The shard store is optional. Future changes will require it.
2244  Section const& section{
2246  if (section.empty())
2247  return nullptr;
2248 
2249  return std::make_unique<DatabaseShardImp>(app, scheduler, readThreads, j);
2250 }
2251 
2252 } // namespace NodeStore
2253 } // namespace ripple
ripple::SQLiteDatabase
Definition: SQLiteDatabase.h:27
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:338
ripple::NodeStore::DatabaseShardImp::iterateLedgerSQLsForward
bool iterateLedgerSQLsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateLedgerSQLsForward Checks out ledger databases for all shards in ascending order starting from ...
Definition: DatabaseShardImp.cpp:2104
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:42
ripple::NodeStore::Database::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const noexcept
Calculates the last ledger sequence for a given shard index.
Definition: Database.h:271
ripple::Application
Definition: Application.h:116
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
std::this_thread::sleep_for
T sleep_for(T... args)
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:2237
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:241
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:233
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:232
ripple::ShardState::complete
@ complete
ripple::DEFAULT_LEDGERS_PER_SHARD
static constexpr std::uint32_t DEFAULT_LEDGERS_PER_SHARD
The number of ledgers in a shard.
Definition: SystemParameters.h:76
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1137
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:51
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1123
ripple::SizedItem
SizedItem
Definition: Config.h:51
ripple::NodeStore::DatabaseShardImp::shards_
std::map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:243
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:196
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:308
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:179
ripple::NodeStore::DatabaseShardImp::callForLedgerSQLByLedgerSeq
bool callForLedgerSQLByLedgerSeq(LedgerIndex ledgerSeq, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the SQLite db holding the corresponding ledger.
Definition: DatabaseShardImp.cpp:2024
ripple::NodeStore::TaskQueue::size
size_t size() const
Return the queue size.
Definition: TaskQueue.cpp:48
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:417
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:213
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:303
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:267
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
ripple::NodeStore::Shard::callForLedgerSQL
bool callForLedgerSQL(std::function< bool(Args... args)> const &callback)
Invoke a callback on the ledger SQLite db.
Definition: nodestore/impl/Shard.h:228
ripple::NodeStore::DatabaseShardImp::stop
void stop() override
Definition: DatabaseShardImp.cpp:700
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
std::chrono::milliseconds
ripple::NodeStore::DatabaseShardImp::taskQueue_
TaskQueue taskQueue_
Definition: DatabaseShardImp.h:240
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:631
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:326
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::Database::stop
virtual void stop()
Definition: Database.cpp:165
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::NodeStore::DatabaseShardImp::getDatabaseImportSequence
std::optional< std::uint32_t > getDatabaseImportSequence() const override
Returns the first ledger sequence of the shard currently being imported from the NodeStore.
Definition: DatabaseShardImp.cpp:1298
std::function
std::all_of
T all_of(T... args)
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: nodestore/impl/Shard.h:251
std::atomic_bool::compare_exchange_strong
T compare_exchange_strong(T... args)
ripple::LedgerMaster::walkHashBySeq
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1759
ripple::NodeStore::DatabaseShardImp::importDatabase
void importDatabase(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:766
ripple::NodeStore::DatabaseShardImp::databaseImporter_
std::thread databaseImporter_
Definition: DatabaseShardImp.h:292
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:273
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapHash.h:32
ripple::NodeStore::DatabaseShardImp::iterateShardsForward
bool iterateShardsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(Shard &shard)> const &visit)
iterateShardsForward Visits all shards starting from given in ascending order and calls given callbac...
Definition: DatabaseShardImp.cpp:2076
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1172
ripple::NodeStore::DatabaseShardImp::stopNodeToShard
Json::Value stopNodeToShard() override
Terminates a NodeStore to ShardStore import and returns the result in a JSON object.
Definition: DatabaseShardImp.cpp:1276
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:77
std::thread::detach
T detach(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:384
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: nodestore/impl/Shard.h:246
ripple::NodeStore::DatabaseShardImp::getDatabaseImportStatus
Json::Value getDatabaseImportStatus() const override
Returns a JSON object detailing the status of an ongoing database import if one is running,...
Definition: DatabaseShardImp.cpp:1224
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
std::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:286
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:270
ripple::NodeStore::DatabaseShardImp::callForTransactionSQLByLedgerSeq
bool callForTransactionSQLByLedgerSeq(LedgerIndex ledgerSeq, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the transaction SQLite db for the corresponding ledger.
Definition: DatabaseShardImp.cpp:2053
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::updatePeers
void updatePeers(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:2202
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:427
ripple::NodeStore::DatabaseShardImp::databaseImportStatus_
std::unique_ptr< DatabaseImportStatus > databaseImportStatus_
Definition: DatabaseShardImp.h:289
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:1086
std::thread::joinable
T joinable(T... args)
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
std::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1403
ripple::NodeStore::Database::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const noexcept
Calculates the first ledger sequence for a given shard index.
Definition: Database.h:257
ripple::Config::reporting
bool reporting() const
Definition: Config.h:349
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::NodeStore::DatabaseShardImp::iterateTransactionSQLsBack
bool iterateTransactionSQLsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateTransactionSQLsBack Checks out transaction databases for all shards in descending order starti...
Definition: DatabaseShardImp.cpp:2168
ripple::NodeStore::DatabaseShardImp::haltDatabaseImport_
std::atomic_bool haltDatabaseImport_
Definition: DatabaseShardImp.h:295
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1910
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1603
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport, bool duplicate) override
Definition: DatabaseShardImp.cpp:1383
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:234
std::thread
STL class.
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::NodeStore::TaskQueue::addTask
void addTask(std::function< void()> task)
Adds a task to the queue.
Definition: TaskQueue.cpp:38
ripple::NodeStore::DatabaseShardImp::callForLedgerSQLByShardIndex
bool callForLedgerSQLByShardIndex(std::uint32_t const shardIndex, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the ledger SQLite db for the corresponding shard.
Definition: DatabaseShardImp.cpp:2039
ripple::Config
Definition: Config.h:92
ripple::NodeStore::DatabaseShardImp::doImportDatabase
void doImportDatabase()
Definition: DatabaseShardImp.cpp:785
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:252
ripple::Config::standalone
bool standalone() const
Definition: Config.h:344
std::unique_lock
STL class.
ripple::Application::getRelationalDatabase
virtual RelationalDatabase & getRelationalDatabase()=0
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1691
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::getNumTasks
size_t getNumTasks() const override
Returns the number of queued tasks.
Definition: DatabaseShardImp.cpp:2195
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard from the shard archive handler into the shard database.
Definition: DatabaseShardImp.cpp:445
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:1103
ripple::NodeStore::TaskQueue::stop
void stop()
Definition: TaskQueue.cpp:32
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:332
ripple::ShardState::finalized
@ finalized
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1309
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
std::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:285
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:249
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:198
std::map
STL class.
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:333
ripple::NodeStore::DatabaseShardImp::startNodeToShard
Json::Value startNodeToShard() override
Initiates a NodeStore to ShardStore import and returns the result in a JSON object.
Definition: DatabaseShardImp.cpp:1253
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:246
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:71
std::weak_ptr
STL class.
ripple::NodeStore::Database::isStopping
bool isStopping() const
Definition: Database.cpp:146
ripple::rpcINTERNAL
@ rpcINTERNAL
Definition: ErrorCodes.h:130
ripple::Serializer
Definition: Serializer.h:40
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:264
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:98
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1943
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:261
ripple::NodeStore::Shard::callForTransactionSQL
bool callForTransactionSQL(std::function< bool(Args... args)> const &callback)
Invoke a callback on the transaction SQLite db.
Definition: nodestore/impl/Shard.h:240
ripple::ShardState::acquire
@ acquire
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:301
ripple::NodeStore::DatabaseShardImp::callForTransactionSQLByShardIndex
bool callForTransactionSQLByShardIndex(std::uint32_t const shardIndex, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the transaction SQLite db for the corresponding shard.
Definition: DatabaseShardImp.cpp:2062
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:553
std::vector::begin
T begin(T... args)
ripple::NodeStore::Database::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const noexcept
Calculates the shard index for a given ledger sequence.
Definition: Database.h:283
std
STL namespace.
ripple::XRP_LEDGER_EARLIEST_SEQ
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ
The XRP ledger network's earliest allowed sequence.
Definition: SystemParameters.h:69
ripple::deserializePrefixedHeader
LedgerHeader deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: protocol/impl/LedgerHeader.cpp:66
ripple::NodeStore::DatabaseShardImp::iterateShardsBack
bool iterateShardsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(Shard &shard)> const &visit)
iterateShardsBack Visits all shards starting from given in descending order and calls given callback ...
Definition: DatabaseShardImp.cpp:2128
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1731
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:207
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1742
ripple::NodeStore::DatabaseShardImp::iterateLedgerSQLsBack
bool iterateLedgerSQLsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateLedgerSQLsBack Checks out ledger databases for all shards in descending order starting from gi...
Definition: DatabaseShardImp.cpp:2157
ripple::NodeStore::DatabaseShardImp::updateFileStats
void updateFileStats()
Definition: DatabaseShardImp.cpp:1542
ripple::NodeStore::Database::earliestLedgerSeq_
const std::uint32_t earliestLedgerSeq_
Definition: Database.h:322
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1715
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:300
std::unique
T unique(T... args)
std::optional< std::uint32_t >
beast::Journal::debug
Stream debug() const
Definition: Journal.h:314
ripple::NodeStore::Database::earliestShardIndex_
const std::uint32_t earliestShardIndex_
Definition: Database.h:325
ripple::to_string
std::string to_string(Manifest const &m)
Format the specified manifest to a string for debugging purposes.
Definition: app/misc/impl/Manifest.cpp:41
ripple::NodeStore::DatabaseShardImp::startDatabaseImportThread
void startDatabaseImportThread(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:2214
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1658
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:255
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition: Database.cpp:252
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:302
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:70
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:238
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, std::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1468
std::max
T max(T... args)
ripple::NodeStore::Shard
Definition: nodestore/impl/Shard.h:55
ripple::NodeStore::Database::maxLedgers
std::uint32_t maxLedgers(std::uint32_t shardIndex) const noexcept
Calculates the maximum ledgers for a given shard index.
Definition: Database.cpp:152
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::make_reverse_iterator
T make_reverse_iterator(T... args)
std::unique_ptr
STL class.
ripple::loadLedgerHelper
std::shared_ptr< Ledger > loadLedgerHelper(LedgerInfo const &info, Application &app, bool acquire)
Definition: Ledger.cpp:1076
ripple::NodeStore::DatabaseShardImp::databaseImportMarker_
static constexpr auto databaseImportMarker_
Definition: DatabaseShardImp.h:276
std::unordered_map
STL class.
ripple::RelationalDatabase::getHashesByIndex
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
ripple::RPC::make_error
Json::Value make_error(error_code_i code)
Returns a new json object that reflects the error code.
Definition: ErrorCodes.cpp:180
ripple::PublisherStatus::available
@ available
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
std::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1879
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::thread::join
T join(T... args)
std::exception::what
T what(T... args)
ripple::ShardState::queued
@ queued
ripple::NodeStore::DatabaseShardImp::iterateTransactionSQLsForward
bool iterateTransactionSQLsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateTransactionSQLsForward Checks out transaction databases for all shards in ascending order star...
Definition: DatabaseShardImp.cpp:2116
ripple::HashPrefix::shardInfo
@ shardInfo
shard info for signing
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::NodeStore::DatabaseShardImp::prepareLedger
std::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:230
ripple::get
T & get(EitherAmount &amt)
Definition: AmountSpec.h:118
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:237
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:127
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:258
ripple::NodeStore::DatabaseShardImp::getShardInfo
std::unique_ptr< ShardInfo > getShardInfo() const override
Query information about shards held.
Definition: DatabaseShardImp.cpp:693