rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
24 #include <ripple/basics/ByteUtilities.h>
25 #include <ripple/basics/RangeSet.h>
26 #include <ripple/basics/chrono.h>
27 #include <ripple/basics/random.h>
28 #include <ripple/core/ConfigSections.h>
29 #include <ripple/nodestore/DummyScheduler.h>
30 #include <ripple/nodestore/impl/DatabaseShardImp.h>
31 #include <ripple/overlay/Overlay.h>
32 #include <ripple/overlay/predicates.h>
33 #include <ripple/protocol/HashPrefix.h>
34 #include <ripple/protocol/digest.h>
35 
36 #include <boost/algorithm/string/predicate.hpp>
37 
38 #if BOOST_OS_LINUX
39 #include <sys/statvfs.h>
40 #endif
41 
42 namespace ripple {
43 
44 namespace NodeStore {
45 
47  Application& app,
48  Scheduler& scheduler,
49  int readThreads,
51  : DatabaseShard(
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
58  , openFinalLimit_(
59  app.config().getValueFor(SizedItem::openFinalLimit, std::nullopt))
60 {
61  if (app.config().reporting())
62  {
63  Throw<std::runtime_error>(
64  "Attempted to create DatabaseShardImp in reporting mode. Reporting "
65  "does not support shards. Remove shards info from config");
66  }
67 }
68 
69 bool
71 {
72  {
73  std::lock_guard lock(mutex_);
74  if (init_)
75  {
76  JLOG(j_.error()) << "already initialized";
77  return false;
78  }
79 
80  if (!initConfig(lock))
81  {
82  JLOG(j_.error()) << "invalid configuration file settings";
83  return false;
84  }
85 
86  try
87  {
88  using namespace boost::filesystem;
89 
90  // Consolidate the main storage path and all historical paths
91  std::vector<path> paths{dir_};
92  paths.insert(
93  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
94 
95  for (auto const& path : paths)
96  {
97  if (exists(path))
98  {
99  if (!is_directory(path))
100  {
101  JLOG(j_.error()) << path << " must be a directory";
102  return false;
103  }
104  }
105  else if (!create_directories(path))
106  {
107  JLOG(j_.error())
108  << "failed to create path: " + path.string();
109  return false;
110  }
111  }
112 
114  {
115  // Check historical paths for duplicated file systems
116  if (!checkHistoricalPaths(lock))
117  return false;
118  }
119 
120  ctx_ = std::make_unique<nudb::context>();
121  ctx_->start();
122 
123  // Find shards
124  std::uint32_t openFinals{0};
125  for (auto const& path : paths)
126  {
127  for (auto const& it : directory_iterator(path))
128  {
129  // Ignore files
130  if (!is_directory(it))
131  continue;
132 
133  // Ignore nonnumerical directory names
134  auto const shardDir{it.path()};
135  auto dirName{shardDir.stem().string()};
136  if (!std::all_of(
137  dirName.begin(), dirName.end(), [](auto c) {
138  return ::isdigit(static_cast<unsigned char>(c));
139  }))
140  {
141  continue;
142  }
143 
144  // Ignore values below the earliest shard index
145  auto const shardIndex{std::stoul(dirName)};
146  if (shardIndex < earliestShardIndex_)
147  {
148  JLOG(j_.debug())
149  << "shard " << shardIndex
150  << " ignored, comes before earliest shard index "
152  continue;
153  }
154 
155  // Check if a previous database import failed
156  if (is_regular_file(shardDir / databaseImportMarker_))
157  {
158  JLOG(j_.warn())
159  << "shard " << shardIndex
160  << " previously failed database import, removing";
161  remove_all(shardDir);
162  continue;
163  }
164 
165  auto shard{std::make_shared<Shard>(
166  app_, *this, shardIndex, shardDir.parent_path(), j_)};
167  if (!shard->init(scheduler_, *ctx_))
168  {
169  // Remove corrupted or legacy shard
170  shard->removeOnDestroy();
171  JLOG(j_.warn())
172  << "shard " << shardIndex << " removed, "
173  << (shard->isLegacy() ? "legacy" : "corrupted")
174  << " shard";
175  continue;
176  }
177 
178  switch (shard->getState())
179  {
181  if (++openFinals > openFinalLimit_)
182  shard->tryClose();
183  shards_.emplace(shardIndex, std::move(shard));
184  break;
185 
188  shards_.emplace(shardIndex, std::move(shard))
189  .first->second,
190  true,
191  std::nullopt);
192  break;
193 
194  case ShardState::acquire:
195  if (acquireIndex_ != 0)
196  {
197  JLOG(j_.error())
198  << "more than one shard being acquired";
199  return false;
200  }
201 
202  shards_.emplace(shardIndex, std::move(shard));
203  acquireIndex_ = shardIndex;
204  break;
205 
206  default:
207  JLOG(j_.error())
208  << "shard " << shardIndex << " invalid state";
209  return false;
210  }
211  }
212  }
213  }
214  catch (std::exception const& e)
215  {
216  JLOG(j_.fatal()) << "Exception caught in function " << __func__
217  << ". Error: " << e.what();
218  return false;
219  }
220 
221  init_ = true;
222  }
223 
224  updateFileStats();
225  return true;
226 }
227 
230 {
231  std::optional<std::uint32_t> shardIndex;
232 
233  {
234  std::lock_guard lock(mutex_);
235  assert(init_);
236 
237  if (acquireIndex_ != 0)
238  {
239  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
240  return it->second->prepare();
241 
242  // Should never get here
243  assert(false);
244  return std::nullopt;
245  }
246 
247  if (!canAdd_)
248  return std::nullopt;
249 
250  shardIndex = findAcquireIndex(validLedgerSeq, lock);
251  }
252 
253  if (!shardIndex)
254  {
255  JLOG(j_.debug()) << "no new shards to add";
256  {
257  std::lock_guard lock(mutex_);
258  canAdd_ = false;
259  }
260  return std::nullopt;
261  }
262 
263  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
264  std::lock_guard lock(mutex_);
265  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
266  }();
267 
268  if (!pathDesignation)
269  return std::nullopt;
270 
271  auto const needsHistoricalPath =
272  *pathDesignation == PathDesignation::historical;
273 
274  auto shard = [this, shardIndex, needsHistoricalPath] {
275  std::lock_guard lock(mutex_);
276  return std::make_unique<Shard>(
277  app_,
278  *this,
279  *shardIndex,
280  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
281  j_);
282  }();
283 
284  if (!shard->init(scheduler_, *ctx_))
285  return std::nullopt;
286 
287  auto const ledgerSeq{shard->prepare()};
288  {
289  std::lock_guard lock(mutex_);
290  shards_.emplace(*shardIndex, std::move(shard));
291  acquireIndex_ = *shardIndex;
292  updatePeers(lock);
293  }
294 
295  return ledgerSeq;
296 }
297 
298 bool
300 {
301  auto fail = [j = j_, &shardIndexes](
302  std::string const& msg,
303  std::optional<std::uint32_t> shardIndex = std::nullopt) {
304  auto multipleIndexPrequel = [&shardIndexes] {
305  std::vector<std::string> indexesAsString(shardIndexes.size());
307  shardIndexes.begin(),
308  shardIndexes.end(),
309  indexesAsString.begin(),
310  [](uint32_t const index) { return std::to_string(index); });
311 
312  return std::string("shard") +
313  (shardIndexes.size() > 1 ? "s " : " ") +
314  boost::algorithm::join(indexesAsString, ", ");
315  };
316 
317  JLOG(j.error()) << (shardIndex ? "shard " + std::to_string(*shardIndex)
318  : multipleIndexPrequel())
319  << " " << msg;
320  return false;
321  };
322 
323  if (shardIndexes.empty())
324  return fail("invalid shard indexes");
325 
326  std::lock_guard lock(mutex_);
327  assert(init_);
328 
329  if (!canAdd_)
330  return fail("cannot be stored at this time");
331 
332  auto historicalShardsToPrepare = 0;
333 
334  for (auto const shardIndex : shardIndexes)
335  {
336  if (shardIndex < earliestShardIndex_)
337  {
338  return fail(
339  "comes before earliest shard index " +
341  shardIndex);
342  }
343 
344  // If we are synced to the network, check if the shard index is
345  // greater or equal to the current or validated shard index.
346  auto seqCheck = [&](std::uint32_t ledgerSeq) {
347  if (ledgerSeq >= earliestLedgerSeq_ &&
348  shardIndex >= seqToShardIndex(ledgerSeq))
349  {
350  return fail("invalid index", shardIndex);
351  }
352  return true;
353  };
354  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
356  {
357  return fail("invalid index", shardIndex);
358  }
359 
360  if (shards_.find(shardIndex) != shards_.end())
361  return fail("is already stored", shardIndex);
362 
363  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
364  return fail(
365  "is already queued for import from the shard archive handler",
366  shardIndex);
367 
369  {
370  if (auto shard = databaseImportStatus_->currentShard.lock(); shard)
371  {
372  if (shard->index() == shardIndex)
373  return fail(
374  "is being imported from the nodestore", shardIndex);
375  }
376  }
377 
378  // Any shard earlier than the two most recent shards
379  // is a historical shard
380  if (shardIndex < shardBoundaryIndex())
381  ++historicalShardsToPrepare;
382  }
383 
384  auto const numHistShards = numHistoricalShards(lock);
385 
386  // Check shard count and available storage space
387  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
388  return fail("maximum number of historical shards reached");
389 
390  if (historicalShardsToPrepare)
391  {
392  // Check available storage space for historical shards
393  if (!sufficientStorage(
394  historicalShardsToPrepare, PathDesignation::historical, lock))
395  return fail("insufficient storage space available");
396  }
397 
398  if (auto const recentShardsToPrepare =
399  shardIndexes.size() - historicalShardsToPrepare;
400  recentShardsToPrepare)
401  {
402  // Check available storage space for recent shards
403  if (!sufficientStorage(
404  recentShardsToPrepare, PathDesignation::none, lock))
405  return fail("insufficient storage space available");
406  }
407 
408  for (auto const shardIndex : shardIndexes)
409  preparedIndexes_.emplace(shardIndex);
410 
411  updatePeers(lock);
412  return true;
413 }
414 
415 void
417 {
418  std::lock_guard lock(mutex_);
419  assert(init_);
420 
421  if (preparedIndexes_.erase(shardIndex))
422  updatePeers(lock);
423 }
424 
427 {
429  {
430  std::lock_guard lock(mutex_);
431  assert(init_);
432 
433  for (auto const& shardIndex : preparedIndexes_)
434  rs.insert(shardIndex);
435  }
436 
437  if (rs.empty())
438  return {};
439 
440  return ripple::to_string(rs);
441 };
442 
443 bool
445  std::uint32_t shardIndex,
446  boost::filesystem::path const& srcDir)
447 {
448  auto fail = [&](std::string const& msg,
449  std::lock_guard<std::mutex> const& lock) {
450  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
451 
452  // Remove the failed import shard index so it can be retried
453  preparedIndexes_.erase(shardIndex);
454  updatePeers(lock);
455  return false;
456  };
457 
458  using namespace boost::filesystem;
459  try
460  {
461  if (!is_directory(srcDir) || is_empty(srcDir))
462  {
463  return fail(
464  "invalid source directory " + srcDir.string(),
466  }
467  }
468  catch (std::exception const& e)
469  {
470  return fail(
471  std::string(". Exception caught in function ") + __func__ +
472  ". Error: " + e.what(),
474  }
475 
476  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
478  if (!expectedHash)
479  return fail("expected hash not found", std::lock_guard(mutex_));
480 
481  path dstDir;
482  {
483  std::lock_guard lock(mutex_);
484  if (shards_.find(shardIndex) != shards_.end())
485  return fail("already exists", lock);
486 
487  // Check shard was prepared for import
488  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
489  return fail("was not prepared for import", lock);
490 
491  auto const pathDesignation{
492  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
493  if (!pathDesignation)
494  return fail("failed to import", lock);
495 
496  if (*pathDesignation == PathDesignation::historical)
497  dstDir = chooseHistoricalPath(lock);
498  else
499  dstDir = dir_;
500  }
501  dstDir /= std::to_string(shardIndex);
502 
503  auto renameDir = [&, fname = __func__](path const& src, path const& dst) {
504  try
505  {
506  rename(src, dst);
507  }
508  catch (std::exception const& e)
509  {
510  return fail(
511  std::string(". Exception caught in function ") + fname +
512  ". Error: " + e.what(),
514  }
515  return true;
516  };
517 
518  // Rename source directory to the shard database directory
519  if (!renameDir(srcDir, dstDir))
520  return false;
521 
522  // Create the new shard
523  auto shard{std::make_unique<Shard>(
524  app_, *this, shardIndex, dstDir.parent_path(), j_)};
525 
526  if (!shard->init(scheduler_, *ctx_) ||
527  shard->getState() != ShardState::complete)
528  {
529  shard.reset();
530  renameDir(dstDir, srcDir);
531  return fail("failed to import", std::lock_guard(mutex_));
532  }
533 
534  auto const [it, inserted] = [&]() {
535  std::lock_guard lock(mutex_);
536  preparedIndexes_.erase(shardIndex);
537  return shards_.emplace(shardIndex, std::move(shard));
538  }();
539 
540  if (!inserted)
541  {
542  shard.reset();
543  renameDir(dstDir, srcDir);
544  return fail("failed to import", std::lock_guard(mutex_));
545  }
546 
547  finalizeShard(it->second, true, expectedHash);
548  return true;
549 }
550 
553 {
554  auto const shardIndex{seqToShardIndex(ledgerSeq)};
555  {
557  {
558  std::lock_guard lock(mutex_);
559  assert(init_);
560 
561  auto const it{shards_.find(shardIndex)};
562  if (it == shards_.end())
563  return nullptr;
564  shard = it->second;
565  }
566 
567  // Ledger must be stored in a final or acquiring shard
568  switch (shard->getState())
569  {
571  break;
572  case ShardState::acquire:
573  if (shard->containsLedger(ledgerSeq))
574  break;
575  [[fallthrough]];
576  default:
577  return nullptr;
578  }
579  }
580 
581  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
582  if (!nodeObject)
583  return nullptr;
584 
585  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
586  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
587  return nullptr;
588  };
589 
590  auto ledger{std::make_shared<Ledger>(
591  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
592  app_.config(),
593  *app_.getShardFamily())};
594 
595  if (ledger->info().seq != ledgerSeq)
596  {
597  return fail(
598  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
599  }
600  if (ledger->info().hash != hash)
601  {
602  return fail(
603  "encountered invalid ledger hash " + to_string(hash) +
604  " on sequence " + std::to_string(ledgerSeq));
605  }
606 
607  ledger->setFull();
608  if (!ledger->stateMap().fetchRoot(
609  SHAMapHash{ledger->info().accountHash}, nullptr))
610  {
611  return fail(
612  "is missing root STATE node on hash " + to_string(hash) +
613  " on sequence " + std::to_string(ledgerSeq));
614  }
615 
616  if (ledger->info().txHash.isNonZero())
617  {
618  if (!ledger->txMap().fetchRoot(
619  SHAMapHash{ledger->info().txHash}, nullptr))
620  {
621  return fail(
622  "is missing root TXN node on hash " + to_string(hash) +
623  " on sequence " + std::to_string(ledgerSeq));
624  }
625  }
626  return ledger;
627 }
628 
629 void
631 {
632  auto const ledgerSeq{ledger->info().seq};
633  if (ledger->info().hash.isZero())
634  {
635  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
636  << ledgerSeq;
637  return;
638  }
639  if (ledger->info().accountHash.isZero())
640  {
641  JLOG(j_.error()) << "zero account hash for ledger sequence "
642  << ledgerSeq;
643  return;
644  }
645  if (ledger->stateMap().getHash().isNonZero() &&
646  !ledger->stateMap().isValid())
647  {
648  JLOG(j_.error()) << "invalid state map for ledger sequence "
649  << ledgerSeq;
650  return;
651  }
652  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
653  {
654  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
655  << ledgerSeq;
656  return;
657  }
658 
659  auto const shardIndex{seqToShardIndex(ledgerSeq)};
661  {
662  std::lock_guard lock(mutex_);
663  assert(init_);
664 
665  if (shardIndex != acquireIndex_)
666  {
667  JLOG(j_.trace())
668  << "shard " << shardIndex << " is not being acquired";
669  return;
670  }
671 
672  auto const it{shards_.find(shardIndex)};
673  if (it == shards_.end())
674  {
675  JLOG(j_.error())
676  << "shard " << shardIndex << " is not being acquired";
677  return;
678  }
679  shard = it->second;
680  }
681 
682  if (shard->containsLedger(ledgerSeq))
683  {
684  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
685  return;
686  }
687 
688  setStoredInShard(shard, ledger);
689 }
690 
693 {
694  std::lock_guard lock(mutex_);
695  return getShardInfo(lock);
696 }
697 
698 void
700 {
701  // Stop read threads in base before data members are destroyed
702  Database::stop();
704  {
705  std::lock_guard lock(mutex_);
706  shards.reserve(shards_.size());
707  for (auto const& [_, shard] : shards_)
708  {
709  shards.push_back(shard);
710  shard->stop();
711  }
712  shards_.clear();
713  }
714  taskQueue_.stop();
715 
716  // All shards should be expired at this point
717  for (auto const& wptr : shards)
718  {
719  if (auto const shard{wptr.lock()})
720  {
721  JLOG(j_.warn()) << " shard " << shard->index() << " unexpired";
722  }
723  }
724 
725  // Notify the shard being imported
726  // from the node store to stop
728  {
729  // A node store import is in progress
730  if (auto importShard = databaseImportStatus_->currentShard.lock();
731  importShard)
732  importShard->stop();
733  }
734 
735  // Wait for the node store import thread
736  // if necessary
739 }
740 
741 void
743 {
744  std::lock_guard lock(mutex_);
745  assert(init_);
746 
747  // Only the application local node store can be imported
748  assert(&source == &app_.getNodeStore());
749 
751  {
752  assert(false);
753  JLOG(j_.error()) << "database import already in progress";
754  return;
755  }
756 
757  // Run the lengthy node store import process in the background
758  // on a dedicated thread.
760 }
761 
762 void
764 {
765  if (isStopping())
766  return;
767 
768  auto loadLedger =
769  [this](char const* const sortOrder) -> std::optional<std::uint32_t> {
771  std::uint32_t ledgerSeq{0};
773  if (sortOrder == std::string("asc"))
774  {
775  info = dynamic_cast<RelationalDBInterfaceSqlite*>(
778  }
779  else
780  {
781  info = dynamic_cast<RelationalDBInterfaceSqlite*>(
784  }
785  if (info)
786  {
787  ledger = loadLedgerHelper(*info, app_, false);
788  ledgerSeq = info->seq;
789  }
790  if (!ledger || ledgerSeq == 0)
791  {
792  JLOG(j_.error()) << "no suitable ledgers were found in"
793  " the SQLite database to import";
794  return std::nullopt;
795  }
796  return ledgerSeq;
797  };
798 
799  // Find earliest ledger sequence stored
800  auto const earliestLedgerSeq{loadLedger("asc")};
801  if (!earliestLedgerSeq)
802  return;
803 
804  auto const earliestIndex = [&] {
805  auto earliestIndex = seqToShardIndex(*earliestLedgerSeq);
806 
807  // Consider only complete shards
808  if (earliestLedgerSeq != firstLedgerSeq(earliestIndex))
809  ++earliestIndex;
810 
811  return earliestIndex;
812  }();
813 
814  // Find last ledger sequence stored
815  auto const latestLedgerSeq = loadLedger("desc");
816  if (!latestLedgerSeq)
817  return;
818 
819  auto const latestIndex = [&] {
820  auto latestIndex = seqToShardIndex(*latestLedgerSeq);
821 
822  // Consider only complete shards
823  if (latestLedgerSeq != lastLedgerSeq(latestIndex))
824  --latestIndex;
825 
826  return latestIndex;
827  }();
828 
829  if (latestIndex < earliestIndex)
830  {
831  JLOG(j_.error()) << "no suitable ledgers were found in"
832  " the SQLite database to import";
833  return;
834  }
835 
836  JLOG(j_.debug()) << "Importing ledgers for shards " << earliestIndex
837  << " through " << latestIndex;
838 
839  {
840  std::lock_guard lock(mutex_);
841 
842  assert(!databaseImportStatus_);
843  databaseImportStatus_ = std::make_unique<DatabaseImportStatus>(
844  earliestIndex, latestIndex, 0);
845  }
846 
847  // Import the shards
848  for (std::uint32_t shardIndex = earliestIndex; shardIndex <= latestIndex;
849  ++shardIndex)
850  {
851  if (isStopping())
852  return;
853 
854  auto const pathDesignation = [this, shardIndex] {
855  std::lock_guard lock(mutex_);
856 
857  auto const numHistShards = numHistoricalShards(lock);
858  auto const pathDesignation =
859  prepareForNewShard(shardIndex, numHistShards, lock);
860 
861  return pathDesignation;
862  }();
863 
864  if (!pathDesignation)
865  break;
866 
867  {
868  std::lock_guard lock(mutex_);
869 
870  // Skip if being acquired
871  if (shardIndex == acquireIndex_)
872  {
873  JLOG(j_.debug())
874  << "shard " << shardIndex << " already being acquired";
875  continue;
876  }
877 
878  // Skip if being imported from the shard archive handler
879  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
880  {
881  JLOG(j_.debug())
882  << "shard " << shardIndex << " already being imported";
883  continue;
884  }
885 
886  // Skip if stored
887  if (shards_.find(shardIndex) != shards_.end())
888  {
889  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
890  continue;
891  }
892  }
893 
894  std::uint32_t const firstSeq = firstLedgerSeq(shardIndex);
895  std::uint32_t const lastSeq =
896  std::max(firstSeq, lastLedgerSeq(shardIndex));
897 
898  // Verify SQLite ledgers are in the node store
899  {
900  auto const ledgerHashes{
902  firstSeq, lastSeq)};
903  if (ledgerHashes.size() != maxLedgers(shardIndex))
904  continue;
905 
906  auto& source = app_.getNodeStore();
907  bool valid{true};
908 
909  for (std::uint32_t n = firstSeq; n <= lastSeq; ++n)
910  {
911  if (!source.fetchNodeObject(ledgerHashes.at(n).ledgerHash, n))
912  {
913  JLOG(j_.warn()) << "SQLite ledger sequence " << n
914  << " mismatches node store";
915  valid = false;
916  break;
917  }
918  }
919  if (!valid)
920  continue;
921  }
922 
923  if (isStopping())
924  return;
925 
926  bool const needsHistoricalPath =
927  *pathDesignation == PathDesignation::historical;
928 
929  auto const path = needsHistoricalPath
931  : dir_;
932 
933  // Create the new shard
934  auto shard{std::make_shared<Shard>(app_, *this, shardIndex, path, j_)};
935  if (!shard->init(scheduler_, *ctx_))
936  continue;
937 
938  {
939  std::lock_guard lock(mutex_);
940 
941  if (isStopping())
942  return;
943 
944  databaseImportStatus_->currentIndex = shardIndex;
945  databaseImportStatus_->currentShard = shard;
946  databaseImportStatus_->firstSeq = firstSeq;
947  databaseImportStatus_->lastSeq = lastSeq;
948  }
949 
950  // Create a marker file to signify a database import in progress
951  auto const shardDir{path / std::to_string(shardIndex)};
952  auto const markerFile{shardDir / databaseImportMarker_};
953  {
954  std::ofstream ofs{markerFile.string()};
955  if (!ofs.is_open())
956  {
957  JLOG(j_.error()) << "shard " << shardIndex
958  << " failed to create temp marker file";
959  shard->removeOnDestroy();
960  continue;
961  }
962  }
963 
964  // Copy the ledgers from node store
965  std::shared_ptr<Ledger> recentStored;
966  std::optional<uint256> lastLedgerHash;
967 
968  while (auto const ledgerSeq = shard->prepare())
969  {
970  if (isStopping())
971  return;
972 
973  // Not const so it may be moved later
974  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
975  if (!ledger || ledger->info().seq != ledgerSeq)
976  break;
977 
978  auto const result{shard->storeLedger(ledger, recentStored)};
979  storeStats(result.count, result.size);
980  if (result.error)
981  break;
982 
983  if (!shard->setLedgerStored(ledger))
984  break;
985 
986  if (!lastLedgerHash && ledgerSeq == lastSeq)
987  lastLedgerHash = ledger->info().hash;
988 
989  recentStored = std::move(ledger);
990  }
991 
992  if (isStopping())
993  return;
994 
995  using namespace boost::filesystem;
996  bool success{false};
997  if (lastLedgerHash && shard->getState() == ShardState::complete)
998  {
999  // Store shard final key
1000  Serializer s;
1001  s.add32(Shard::version);
1002  s.add32(firstLedgerSeq(shardIndex));
1003  s.add32(lastLedgerSeq(shardIndex));
1004  s.addBitString(*lastLedgerHash);
1005  auto const nodeObject{NodeObject::createObject(
1006  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
1007 
1008  if (shard->storeNodeObject(nodeObject))
1009  {
1010  try
1011  {
1012  std::lock_guard lock(mutex_);
1013 
1014  // The database import process is complete and the
1015  // marker file is no longer required
1016  remove_all(markerFile);
1017 
1018  JLOG(j_.debug()) << "shard " << shardIndex
1019  << " was successfully imported"
1020  " from the NodeStore";
1021  finalizeShard(
1022  shards_.emplace(shardIndex, std::move(shard))
1023  .first->second,
1024  true,
1025  std::nullopt);
1026 
1027  // This variable is meant to capture the success
1028  // of everything up to the point of shard finalization.
1029  // If the shard fails to finalize, this condition will
1030  // be handled by the finalization function itself, and
1031  // not here.
1032  success = true;
1033  }
1034  catch (std::exception const& e)
1035  {
1036  JLOG(j_.fatal()) << "shard index " << shardIndex
1037  << ". Exception caught in function "
1038  << __func__ << ". Error: " << e.what();
1039  }
1040  }
1041  }
1042 
1043  if (!success)
1044  {
1045  JLOG(j_.error()) << "shard " << shardIndex
1046  << " failed to import from the NodeStore";
1047  shard->removeOnDestroy();
1048  }
1049  }
1050 
1051  {
1052  std::lock_guard lock(mutex_);
1053  if (isStopping())
1054  return;
1055 
1056  databaseImportStatus_.reset();
1057  }
1058 
1059  updateFileStats();
1060 }
1061 
1064 {
1065  std::shared_ptr<Shard> shard;
1066  {
1067  std::lock_guard lock(mutex_);
1068  assert(init_);
1069 
1070  auto const it{shards_.find(acquireIndex_)};
1071  if (it == shards_.end())
1072  return 0;
1073  shard = it->second;
1074  }
1075 
1076  return shard->getWriteLoad();
1077 }
1078 
1079 void
1081  NodeObjectType type,
1082  Blob&& data,
1083  uint256 const& hash,
1084  std::uint32_t ledgerSeq)
1085 {
1086  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1087  std::shared_ptr<Shard> shard;
1088  {
1089  std::lock_guard lock(mutex_);
1090  if (shardIndex != acquireIndex_)
1091  {
1092  JLOG(j_.trace())
1093  << "shard " << shardIndex << " is not being acquired";
1094  return;
1095  }
1096 
1097  auto const it{shards_.find(shardIndex)};
1098  if (it == shards_.end())
1099  {
1100  JLOG(j_.error())
1101  << "shard " << shardIndex << " is not being acquired";
1102  return;
1103  }
1104  shard = it->second;
1105  }
1106 
1107  auto const nodeObject{
1108  NodeObject::createObject(type, std::move(data), hash)};
1109  if (shard->storeNodeObject(nodeObject))
1110  storeStats(1, nodeObject->getData().size());
1111 }
1112 
1113 bool
1115 {
1116  auto const ledgerSeq{srcLedger->info().seq};
1117  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1118  std::shared_ptr<Shard> shard;
1119  {
1120  std::lock_guard lock(mutex_);
1121  assert(init_);
1122 
1123  if (shardIndex != acquireIndex_)
1124  {
1125  JLOG(j_.trace())
1126  << "shard " << shardIndex << " is not being acquired";
1127  return false;
1128  }
1129 
1130  auto const it{shards_.find(shardIndex)};
1131  if (it == shards_.end())
1132  {
1133  JLOG(j_.error())
1134  << "shard " << shardIndex << " is not being acquired";
1135  return false;
1136  }
1137  shard = it->second;
1138  }
1139 
1140  auto const result{shard->storeLedger(srcLedger, nullptr)};
1141  storeStats(result.count, result.size);
1142  if (result.error || result.count == 0 || result.size == 0)
1143  return false;
1144 
1145  return setStoredInShard(shard, srcLedger);
1146 }
1147 
1148 void
1150 {
1152  {
1153  std::lock_guard lock(mutex_);
1154  assert(init_);
1155 
1156  shards.reserve(shards_.size());
1157  for (auto const& e : shards_)
1158  shards.push_back(e.second);
1159  }
1160 
1162  openFinals.reserve(openFinalLimit_);
1163 
1164  for (auto const& weak : shards)
1165  {
1166  if (auto const shard{weak.lock()}; shard && shard->isOpen())
1167  {
1168  if (shard->getState() == ShardState::finalized)
1169  openFinals.emplace_back(std::move(shard));
1170  }
1171  }
1172 
1173  if (openFinals.size() > openFinalLimit_)
1174  {
1175  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1176  << openFinalLimit_ << " by "
1177  << (openFinals.size() - openFinalLimit_);
1178 
1179  // Try to close enough shards to be within the limit.
1180  // Sort ascending on last use so the oldest are removed first.
1181  std::sort(
1182  openFinals.begin(),
1183  openFinals.end(),
1184  [&](std::shared_ptr<Shard> const& lhsShard,
1185  std::shared_ptr<Shard> const& rhsShard) {
1186  return lhsShard->getLastUse() < rhsShard->getLastUse();
1187  });
1188 
1189  for (auto it{openFinals.cbegin()};
1190  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1191  {
1192  if ((*it)->tryClose())
1193  it = openFinals.erase(it);
1194  else
1195  ++it;
1196  }
1197  }
1198 }
1199 
1202 {
1204 
1206  {
1207  ret[jss::firstShardIndex] = databaseImportStatus_->earliestIndex;
1208  ret[jss::lastShardIndex] = databaseImportStatus_->latestIndex;
1209  ret[jss::currentShardIndex] = databaseImportStatus_->currentIndex;
1210 
1211  Json::Value currentShard(Json::objectValue);
1212  currentShard[jss::firstSequence] = databaseImportStatus_->firstSeq;
1213  currentShard[jss::lastSequence] = databaseImportStatus_->lastSeq;
1214 
1215  if (auto shard = databaseImportStatus_->currentShard.lock(); shard)
1216  currentShard[jss::storedSeqs] = shard->getStoredSeqs();
1217 
1218  ret[jss::currentShard] = currentShard;
1219  }
1220  else
1221  ret = "Database import not running";
1222 
1223  return ret;
1224 }
1225 
1228 {
1229  std::lock_guard lock(mutex_);
1230 
1231  if (!databaseImportStatus_)
1232  return {};
1233 
1234  return databaseImportStatus_->firstSeq;
1235 }
1236 
1237 bool
1239 {
1240  auto fail = [j = j_](std::string const& msg) {
1241  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1242  return false;
1243  };
1244 
1245  Config const& config{app_.config()};
1246  Section const& section{config.section(ConfigSection::shardDatabase())};
1247 
1248  auto compare = [&](std::string const& name, std::uint32_t defaultValue) {
1249  std::uint32_t shardDBValue{defaultValue};
1250  get_if_exists<std::uint32_t>(section, name, shardDBValue);
1251 
1252  std::uint32_t nodeDBValue{defaultValue};
1253  get_if_exists<std::uint32_t>(
1254  config.section(ConfigSection::nodeDatabase()), name, nodeDBValue);
1255 
1256  return shardDBValue == nodeDBValue;
1257  };
1258 
1259  // If ledgers_per_shard or earliest_seq are specified,
1260  // they must be equally assigned in 'node_db'
1261  if (!compare("ledgers_per_shard", DEFAULT_LEDGERS_PER_SHARD))
1262  {
1263  return fail(
1264  "and [" + ConfigSection::nodeDatabase() + "] define different '" +
1265  "ledgers_per_shard" + "' values");
1266  }
1267  if (!compare("earliest_seq", XRP_LEDGER_EARLIEST_SEQ))
1268  {
1269  return fail(
1270  "and [" + ConfigSection::nodeDatabase() + "] define different '" +
1271  "earliest_seq" + "' values");
1272  }
1273 
1274  using namespace boost::filesystem;
1275  if (!get_if_exists<path>(section, "path", dir_))
1276  return fail("'path' missing");
1277 
1278  {
1279  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1280 
1281  Section const& historicalShardPaths =
1282  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1283 
1284  auto values = historicalShardPaths.values();
1285 
1286  std::sort(values.begin(), values.end());
1287  values.erase(std::unique(values.begin(), values.end()), values.end());
1288 
1289  for (auto const& s : values)
1290  {
1291  auto const dir = path(s);
1292  if (dir_ == dir)
1293  {
1294  return fail(
1295  "the 'path' cannot also be in the "
1296  "'historical_shard_path' section");
1297  }
1298 
1300  }
1301  }
1302 
1303  // NuDB is the default and only supported permanent storage backend
1304  backendName_ = get(section, "type", "nudb");
1305  if (!boost::iequals(backendName_, "NuDB"))
1306  return fail("'type' value unsupported");
1307 
1308  return true;
1309 }
1310 
1313  uint256 const& hash,
1314  std::uint32_t ledgerSeq,
1315  FetchReport& fetchReport)
1316 {
1317  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1318  std::shared_ptr<Shard> shard;
1319  {
1320  std::lock_guard lock(mutex_);
1321  auto const it{shards_.find(shardIndex)};
1322  if (it == shards_.end())
1323  return nullptr;
1324  shard = it->second;
1325  }
1326 
1327  return shard->fetchNodeObject(hash, fetchReport);
1328 }
1329 
1332  std::uint32_t validLedgerSeq,
1334 {
1335  if (validLedgerSeq < earliestLedgerSeq_)
1336  return std::nullopt;
1337 
1338  auto const maxShardIndex{[this, validLedgerSeq]() {
1339  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1340  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1341  --shardIndex;
1342  return shardIndex;
1343  }()};
1344  auto const maxNumShards{maxShardIndex - earliestShardIndex_ + 1};
1345 
1346  // Check if the shard store has all shards
1347  if (shards_.size() >= maxNumShards)
1348  return std::nullopt;
1349 
1350  if (maxShardIndex < 1024 ||
1351  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1352  {
1353  // Small or mostly full index space to sample
1354  // Find the available indexes and select one at random
1356  available.reserve(maxNumShards - shards_.size());
1357 
1358  for (auto shardIndex = earliestShardIndex_; shardIndex <= maxShardIndex;
1359  ++shardIndex)
1360  {
1361  if (shards_.find(shardIndex) == shards_.end() &&
1362  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1363  {
1364  available.push_back(shardIndex);
1365  }
1366  }
1367 
1368  if (available.empty())
1369  return std::nullopt;
1370 
1371  if (available.size() == 1)
1372  return available.front();
1373 
1374  return available[rand_int(
1375  0u, static_cast<std::uint32_t>(available.size() - 1))];
1376  }
1377 
1378  // Large, sparse index space to sample
1379  // Keep choosing indexes at random until an available one is found
1380  // chances of running more than 30 times is less than 1 in a billion
1381  for (int i = 0; i < 40; ++i)
1382  {
1383  auto const shardIndex{rand_int(earliestShardIndex_, maxShardIndex)};
1384  if (shards_.find(shardIndex) == shards_.end() &&
1385  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1386  {
1387  return shardIndex;
1388  }
1389  }
1390 
1391  assert(false);
1392  return std::nullopt;
1393 }
1394 
1395 void
1397  std::shared_ptr<Shard>& shard,
1398  bool const writeSQLite,
1399  std::optional<uint256> const& expectedHash)
1400 {
1401  taskQueue_.addTask([this,
1402  wptr = std::weak_ptr<Shard>(shard),
1403  writeSQLite,
1404  expectedHash]() {
1405  if (isStopping())
1406  return;
1407 
1408  auto shard{wptr.lock()};
1409  if (!shard)
1410  {
1411  JLOG(j_.debug()) << "Shard removed before being finalized";
1412  return;
1413  }
1414 
1415  if (!shard->finalize(writeSQLite, expectedHash))
1416  {
1417  if (isStopping())
1418  return;
1419 
1420  // Invalid or corrupt shard, remove it
1421  removeFailedShard(shard);
1422  return;
1423  }
1424 
1425  if (isStopping())
1426  return;
1427 
1428  {
1429  auto const boundaryIndex{shardBoundaryIndex()};
1430  std::lock_guard lock(mutex_);
1431 
1432  if (shard->index() < boundaryIndex)
1433  {
1434  // This is a historical shard
1435  if (!historicalPaths_.empty() &&
1436  shard->getDir().parent_path() == dir_)
1437  {
1438  // Shard wasn't placed at a separate historical path
1439  JLOG(j_.warn()) << "shard " << shard->index()
1440  << " is not stored at a historical path";
1441  }
1442  }
1443  else
1444  {
1445  // Not a historical shard. Shift recent shards if necessary
1446  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1447  relocateOutdatedShards(lock);
1448 
1449  // Set the appropriate recent shard index
1450  if (shard->index() == boundaryIndex)
1451  secondLatestShardIndex_ = shard->index();
1452  else
1453  latestShardIndex_ = shard->index();
1454 
1455  if (shard->getDir().parent_path() != dir_)
1456  {
1457  JLOG(j_.warn()) << "shard " << shard->index()
1458  << " is not stored at the path";
1459  }
1460  }
1461 
1462  updatePeers(lock);
1463  }
1464 
1465  updateFileStats();
1466  });
1467 }
1468 
1469 void
1471 {
1473  {
1474  std::lock_guard lock(mutex_);
1475  if (shards_.empty())
1476  return;
1477 
1478  shards.reserve(shards_.size());
1479  for (auto const& e : shards_)
1480  shards.push_back(e.second);
1481  }
1482 
1483  std::uint64_t sumSz{0};
1484  std::uint32_t sumFd{0};
1485  std::uint32_t numShards{0};
1486  for (auto const& weak : shards)
1487  {
1488  if (auto const shard{weak.lock()}; shard)
1489  {
1490  auto const [sz, fd] = shard->getFileInfo();
1491  sumSz += sz;
1492  sumFd += fd;
1493  ++numShards;
1494  }
1495  }
1496 
1497  std::lock_guard lock(mutex_);
1498  fileSz_ = sumSz;
1499  fdRequired_ = sumFd;
1500  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1501 
1502  if (!canAdd_)
1503  return;
1504 
1505  if (auto const count = numHistoricalShards(lock);
1506  count >= maxHistoricalShards_)
1507  {
1509  {
1510  // In order to avoid excessive output, don't produce
1511  // this warning if the server isn't configured to
1512  // store historical shards.
1513  JLOG(j_.warn()) << "maximum number of historical shards reached";
1514  }
1515 
1516  canAdd_ = false;
1517  }
1518  else if (!sufficientStorage(
1519  maxHistoricalShards_ - count,
1521  lock))
1522  {
1523  JLOG(j_.warn())
1524  << "maximum shard store size exceeds available storage space";
1525 
1526  canAdd_ = false;
1527  }
1528 }
1529 
1530 bool
1532  std::uint32_t numShards,
1533  PathDesignation pathDesignation,
1534  std::lock_guard<std::mutex> const&) const
1535 {
1536  try
1537  {
1538  std::vector<std::uint64_t> capacities;
1539 
1540  if (pathDesignation == PathDesignation::historical &&
1542  {
1543  capacities.reserve(historicalPaths_.size());
1544 
1545  for (auto const& path : historicalPaths_)
1546  {
1547  // Get the available storage for each historical path
1548  auto const availableSpace =
1549  boost::filesystem::space(path).available;
1550 
1551  capacities.push_back(availableSpace);
1552  }
1553  }
1554  else
1555  {
1556  // Get the available storage for the main shard path
1557  capacities.push_back(boost::filesystem::space(dir_).available);
1558  }
1559 
1560  for (std::uint64_t const capacity : capacities)
1561  {
1562  // Leverage all the historical shard paths to
1563  // see if collectively they can fit the specified
1564  // number of shards. For this to work properly,
1565  // each historical path must correspond to a separate
1566  // physical device or filesystem.
1567 
1568  auto const shardCap = capacity / avgShardFileSz_;
1569  if (numShards <= shardCap)
1570  return true;
1571 
1572  numShards -= shardCap;
1573  }
1574  }
1575  catch (std::exception const& e)
1576  {
1577  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1578  << ". Error: " << e.what();
1579  return false;
1580  }
1581 
1582  return false;
1583 }
1584 
1585 bool
1587  std::shared_ptr<Shard>& shard,
1588  std::shared_ptr<Ledger const> const& ledger)
1589 {
1590  if (!shard->setLedgerStored(ledger))
1591  {
1592  // Invalid or corrupt shard, remove it
1593  removeFailedShard(shard);
1594  return false;
1595  }
1596 
1597  if (shard->getState() == ShardState::complete)
1598  {
1599  std::lock_guard lock(mutex_);
1600  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1601  {
1602  if (shard->index() == acquireIndex_)
1603  acquireIndex_ = 0;
1604 
1605  finalizeShard(it->second, false, std::nullopt);
1606  }
1607  else
1608  {
1609  JLOG(j_.debug())
1610  << "shard " << shard->index() << " is no longer being acquired";
1611  }
1612  }
1613 
1614  updateFileStats();
1615  return true;
1616 }
1617 
1618 void
1620 {
1621  {
1622  std::lock_guard lock(mutex_);
1623 
1624  if (shard->index() == acquireIndex_)
1625  acquireIndex_ = 0;
1626 
1627  if (shard->index() == latestShardIndex_)
1628  latestShardIndex_ = std::nullopt;
1629 
1630  if (shard->index() == secondLatestShardIndex_)
1631  secondLatestShardIndex_ = std::nullopt;
1632  }
1633 
1634  shard->removeOnDestroy();
1635 
1636  // Reset the shared_ptr to invoke the shard's
1637  // destructor and remove it from the server
1638  shard.reset();
1639  updateFileStats();
1640 }
1641 
1644 {
1645  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1646 
1647  if (validIndex < earliestLedgerSeq_)
1648  return 0;
1649 
1650  // Shards with an index earlier than the recent shard boundary index
1651  // are considered historical. The three shards at or later than
1652  // this index consist of the two most recently validated shards
1653  // and the shard still in the process of being built by live
1654  // transactions.
1655  return seqToShardIndex(validIndex) - 1;
1656 }
1657 
1660  std::lock_guard<std::mutex> const& lock) const
1661 {
1662  auto const boundaryIndex{shardBoundaryIndex()};
1663  return std::count_if(
1664  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1665  return entry.first < boundaryIndex;
1666  });
1667 }
1668 
1669 void
1671  std::lock_guard<std::mutex> const& lock)
1672 {
1673  auto& cur{latestShardIndex_};
1674  auto& prev{secondLatestShardIndex_};
1675  if (!cur && !prev)
1676  return;
1677 
1678  auto const latestShardIndex =
1680  auto const separateHistoricalPath = !historicalPaths_.empty();
1681 
1682  auto const removeShard = [this](std::uint32_t const shardIndex) -> void {
1683  canAdd_ = false;
1684 
1685  if (auto it = shards_.find(shardIndex); it != shards_.end())
1686  {
1687  if (it->second)
1688  removeFailedShard(it->second);
1689  else
1690  {
1691  JLOG(j_.warn()) << "can't find shard to remove";
1692  }
1693  }
1694  else
1695  {
1696  JLOG(j_.warn()) << "can't find shard to remove";
1697  }
1698  };
1699 
1700  auto const keepShard = [this, &lock, removeShard, separateHistoricalPath](
1701  std::uint32_t const shardIndex) -> bool {
1703  {
1704  JLOG(j_.error()) << "maximum number of historical shards reached";
1705  removeShard(shardIndex);
1706  return false;
1707  }
1708  if (separateHistoricalPath &&
1710  {
1711  JLOG(j_.error()) << "insufficient storage space available";
1712  removeShard(shardIndex);
1713  return false;
1714  }
1715 
1716  return true;
1717  };
1718 
1719  // Move a shard from the main shard path to a historical shard
1720  // path by copying the contents, and creating a new shard.
1721  auto const moveShard = [this,
1722  &lock](std::uint32_t const shardIndex) -> void {
1723  auto it{shards_.find(shardIndex)};
1724  if (it == shards_.end())
1725  {
1726  JLOG(j_.warn()) << "can't find shard to move to historical path";
1727  return;
1728  }
1729 
1730  auto& shard{it->second};
1731 
1732  // Close any open file descriptors before moving the shard
1733  // directory. Don't call removeOnDestroy since that would
1734  // attempt to close the fds after the directory has been moved.
1735  if (!shard->tryClose())
1736  {
1737  JLOG(j_.warn()) << "can't close shard to move to historical path";
1738  return;
1739  }
1740 
1741  auto const dst{chooseHistoricalPath(lock)};
1742  try
1743  {
1744  // Move the shard directory to the new path
1745  boost::filesystem::rename(
1746  shard->getDir().string(), dst / std::to_string(shardIndex));
1747  }
1748  catch (...)
1749  {
1750  JLOG(j_.error()) << "shard " << shardIndex
1751  << " failed to move to historical storage";
1752  return;
1753  }
1754 
1755  // Create a shard instance at the new location
1756  shard = std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1757 
1758  // Open the new shard
1759  if (!shard->init(scheduler_, *ctx_))
1760  {
1761  JLOG(j_.error()) << "shard " << shardIndex
1762  << " failed to open in historical storage";
1763  shard->removeOnDestroy();
1764  shard.reset();
1765  }
1766  };
1767 
1768  // See if either of the recent shards needs to be updated
1769  bool const curNotSynched =
1770  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1771  bool const prevNotSynched = secondLatestShardIndex_ &&
1772  *secondLatestShardIndex_ != latestShardIndex - 1;
1773 
1774  // A new shard has been published. Move outdated
1775  // shards to historical storage as needed
1776  if (curNotSynched || prevNotSynched)
1777  {
1778  if (prev)
1779  {
1780  // Move the formerly second latest shard to historical storage
1781  if (keepShard(*prev) && separateHistoricalPath)
1782  moveShard(*prev);
1783 
1784  prev = std::nullopt;
1785  }
1786 
1787  if (cur)
1788  {
1789  // The formerly latest shard is now the second latest
1790  if (cur == latestShardIndex - 1)
1791  prev = cur;
1792 
1793  // The formerly latest shard is no longer a 'recent' shard
1794  else
1795  {
1796  // Move the formerly latest shard to historical storage
1797  if (keepShard(*cur) && separateHistoricalPath)
1798  moveShard(*cur);
1799  }
1800 
1801  cur = std::nullopt;
1802  }
1803  }
1804 }
1805 
1806 auto
1808  std::uint32_t shardIndex,
1811 {
1812  // Any shard earlier than the two most recent shards is a historical shard
1813  auto const boundaryIndex{shardBoundaryIndex()};
1814  auto const isHistoricalShard = shardIndex < boundaryIndex;
1815 
1816  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1819 
1820  // Check shard count and available storage space
1821  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1822  {
1823  JLOG(j_.error()) << "maximum number of historical shards reached";
1824  canAdd_ = false;
1825  return std::nullopt;
1826  }
1827  if (!sufficientStorage(1, designation, lock))
1828  {
1829  JLOG(j_.error()) << "insufficient storage space available";
1830  canAdd_ = false;
1831  return std::nullopt;
1832  }
1833 
1834  return designation;
1835 }
1836 
1837 boost::filesystem::path
1839 {
1840  // If not configured with separate historical paths,
1841  // use the main path (dir_) by default.
1842  if (historicalPaths_.empty())
1843  return dir_;
1844 
1845  boost::filesystem::path historicalShardPath;
1846  std::vector<boost::filesystem::path> potentialPaths;
1847 
1848  for (boost::filesystem::path const& path : historicalPaths_)
1849  {
1850  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1851  potentialPaths.push_back(path);
1852  }
1853 
1854  if (potentialPaths.empty())
1855  {
1856  JLOG(j_.error()) << "failed to select a historical shard path";
1857  return "";
1858  }
1859 
1860  std::sample(
1861  potentialPaths.begin(),
1862  potentialPaths.end(),
1863  &historicalShardPath,
1864  1,
1865  default_prng());
1866 
1867  return historicalShardPath;
1868 }
1869 
1870 bool
1872 {
1873 #if BOOST_OS_LINUX
1874  // Each historical shard path must correspond
1875  // to a directory on a distinct device or file system.
1876  // Currently, this constraint is enforced only on Linux.
1879 
1880  for (auto const& path : historicalPaths_)
1881  {
1882  struct statvfs buffer;
1883  if (statvfs(path.c_str(), &buffer))
1884  {
1885  JLOG(j_.error())
1886  << "failed to acquire stats for 'historical_shard_path': "
1887  << path;
1888  return false;
1889  }
1890 
1891  filesystemIDs[buffer.f_fsid].push_back(path.string());
1892  }
1893 
1894  bool ret = true;
1895  for (auto const& entry : filesystemIDs)
1896  {
1897  // Check to see if any of the paths are stored on the same file system
1898  if (entry.second.size() > 1)
1899  {
1900  // Two or more historical storage paths
1901  // correspond to the same file system.
1902  JLOG(j_.error())
1903  << "The following paths correspond to the same filesystem: "
1904  << boost::algorithm::join(entry.second, ", ")
1905  << ". Each configured historical storage path should"
1906  " be on a unique device or filesystem.";
1907 
1908  ret = false;
1909  }
1910  }
1911 
1912  return ret;
1913 
1914 #else
1915  // The requirement that each historical storage path
1916  // corresponds to a distinct device or file system is
1917  // enforced only on Linux, so on other platforms
1918  // keep track of the available capacities for each
1919  // path. Issue a warning if we suspect any of the paths
1920  // may violate this requirement.
1921 
1922  // Map byte counts to each path that shares that byte count.
1924  uniqueCapacities(historicalPaths_.size());
1925 
1926  for (auto const& path : historicalPaths_)
1927  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1928  path.string());
1929 
1930  for (auto const& entry : uniqueCapacities)
1931  {
1932  // Check to see if any paths have the same amount of available bytes.
1933  if (entry.second.size() > 1)
1934  {
1935  // Two or more historical storage paths may
1936  // correspond to the same device or file system.
1937  JLOG(j_.warn())
1938  << "Each of the following paths have " << entry.first
1939  << " bytes free, and may be located on the same device"
1940  " or file system: "
1941  << boost::algorithm::join(entry.second, ", ")
1942  << ". Each configured historical storage path should"
1943  " be on a unique device or file system.";
1944  }
1945  }
1946 #endif
1947 
1948  return true;
1949 }
1950 
1951 bool
1953  LedgerIndex ledgerSeq,
1954  std::function<bool(soci::session& session)> const& callback)
1955 {
1956  return callForLedgerSQLByShardIndex(seqToShardIndex(ledgerSeq), callback);
1957 }
1958 
1959 bool
1961  const uint32_t shardIndex,
1962  std::function<bool(soci::session& session)> const& callback)
1963 {
1964  std::lock_guard lock(mutex_);
1965 
1966  auto const it{shards_.find(shardIndex)};
1967 
1968  return it != shards_.end() &&
1969  it->second->getState() == ShardState::finalized &&
1970  it->second->callForLedgerSQL(callback);
1971 }
1972 
1973 bool
1975  LedgerIndex ledgerSeq,
1976  std::function<bool(soci::session& session)> const& callback)
1977 {
1979  seqToShardIndex(ledgerSeq), callback);
1980 }
1981 
1982 bool
1984  std::uint32_t const shardIndex,
1985  std::function<bool(soci::session& session)> const& callback)
1986 {
1987  std::lock_guard lock(mutex_);
1988 
1989  auto const it{shards_.find(shardIndex)};
1990 
1991  return it != shards_.end() &&
1992  it->second->getState() == ShardState::finalized &&
1993  it->second->callForTransactionSQL(callback);
1994 }
1995 
1996 bool
1998  std::optional<std::uint32_t> minShardIndex,
1999  std::function<bool(Shard& shard)> const& visit)
2000 {
2001  std::lock_guard lock(mutex_);
2002 
2004 
2005  if (!minShardIndex)
2006  it = shards_.begin();
2007  else
2008  it = shards_.lower_bound(*minShardIndex);
2009 
2010  eit = shards_.end();
2011 
2012  for (; it != eit; it++)
2013  {
2014  if (it->second->getState() == ShardState::finalized)
2015  {
2016  if (!visit(*it->second))
2017  return false;
2018  }
2019  }
2020 
2021  return true;
2022 }
2023 
2024 bool
2026  std::optional<std::uint32_t> minShardIndex,
2027  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2028  callback)
2029 {
2030  return iterateShardsForward(
2031  minShardIndex, [&callback](Shard& shard) -> bool {
2032  return shard.callForLedgerSQL(callback);
2033  });
2034 }
2035 
2036 bool
2038  std::optional<std::uint32_t> minShardIndex,
2039  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2040  callback)
2041 {
2042  return iterateShardsForward(
2043  minShardIndex, [&callback](Shard& shard) -> bool {
2044  return shard.callForTransactionSQL(callback);
2045  });
2046 }
2047 
2048 bool
2050  std::optional<std::uint32_t> maxShardIndex,
2051  std::function<bool(Shard& shard)> const& visit)
2052 {
2053  std::lock_guard lock(mutex_);
2054 
2055  std::map<std::uint32_t, std::shared_ptr<Shard>>::reverse_iterator it, eit;
2056 
2057  if (!maxShardIndex)
2058  it = shards_.rbegin();
2059  else
2060  it = std::make_reverse_iterator(shards_.upper_bound(*maxShardIndex));
2061 
2062  eit = shards_.rend();
2063 
2064  for (; it != eit; it++)
2065  {
2066  if (it->second->getState() == ShardState::finalized &&
2067  (!maxShardIndex || it->first <= *maxShardIndex))
2068  {
2069  if (!visit(*it->second))
2070  return false;
2071  }
2072  }
2073 
2074  return true;
2075 }
2076 
2077 bool
2079  std::optional<std::uint32_t> maxShardIndex,
2080  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2081  callback)
2082 {
2083  return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool {
2084  return shard.callForLedgerSQL(callback);
2085  });
2086 }
2087 
2088 bool
2090  std::optional<std::uint32_t> maxShardIndex,
2091  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2092  callback)
2093 {
2094  return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool {
2095  return shard.callForTransactionSQL(callback);
2096  });
2097 }
2098 
2101 {
2102  auto shardInfo{std::make_unique<ShardInfo>()};
2103  for (auto const& [_, shard] : shards_)
2104  {
2105  shardInfo->update(
2106  shard->index(), shard->getState(), shard->getPercentProgress());
2107  }
2108 
2109  for (auto const shardIndex : preparedIndexes_)
2110  shardInfo->update(shardIndex, ShardState::queued, 0);
2111 
2112  return shardInfo;
2113 }
2114 
2115 size_t
2117 {
2118  std::lock_guard lock(mutex_);
2119  return taskQueue_.size();
2120 }
2121 
2122 void
2124 {
2125  if (!app_.config().standalone() &&
2127  {
2128  auto const message{getShardInfo(lock)->makeMessage(app_)};
2129  app_.overlay().foreach(send_always(std::make_shared<Message>(
2130  message, protocol::mtPEER_SHARD_INFO_V2)));
2131  }
2132 }
2133 
2134 //------------------------------------------------------------------------------
2135 
2138  Application& app,
2139  Scheduler& scheduler,
2140  int readThreads,
2141  beast::Journal j)
2142 {
2143  // The shard store is optional. Future changes will require it.
2144  Section const& section{
2146  if (section.empty())
2147  return nullptr;
2148 
2149  return std::make_unique<DatabaseShardImp>(app, scheduler, readThreads, j);
2150 }
2151 
2152 } // namespace NodeStore
2153 } // namespace ripple
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::NodeStore::DatabaseShardImp::iterateLedgerSQLsForward
bool iterateLedgerSQLsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateLedgerSQLsForward Checks out ledger databases for all shards in ascending order starting from ...
Definition: DatabaseShardImp.cpp:2025
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:42
ripple::NodeStore::Database::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const noexcept
Calculates the last ledger sequence for a given shard index.
Definition: Database.h:271
ripple::Application
Definition: Application.h:103
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:2137
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:227
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:226
ripple::ShardState::complete
@ complete
ripple::DEFAULT_LEDGERS_PER_SHARD
static constexpr std::uint32_t DEFAULT_LEDGERS_PER_SHARD
The number of ledgers in a shard.
Definition: SystemParameters.h:64
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1114
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1061
ripple::SizedItem
SizedItem
Definition: Config.h:48
ripple::NodeStore::DatabaseShardImp::shards_
std::map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:237
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:190
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:178
ripple::NodeStore::DatabaseShardImp::callForLedgerSQLByLedgerSeq
bool callForLedgerSQLByLedgerSeq(LedgerIndex ledgerSeq, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the SQLite db holding the corresponding ledger.
Definition: DatabaseShardImp.cpp:1952
ripple::NodeStore::TaskQueue::size
size_t size() const
Return the queue size.
Definition: TaskQueue.cpp:48
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:416
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:214
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:303
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:261
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
ripple::NodeStore::Shard::callForLedgerSQL
bool callForLedgerSQL(std::function< bool(Args... args)> const &callback)
Invoke a callback on the ledger SQLite db.
Definition: Shard.h:225
ripple::NodeStore::DatabaseShardImp::stop
void stop() override
Definition: DatabaseShardImp.cpp:699
std::vector::size
T size(T... args)
ripple::Application::getRelationalDBInterface
virtual RelationalDBInterface & getRelationalDBInterface()=0
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
TaskQueue taskQueue_
Definition: DatabaseShardImp.h:234
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:630
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::Database::stop
virtual void stop()
Definition: Database.cpp:89
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::NodeStore::DatabaseShardImp::getDatabaseImportSequence
std::optional< std::uint32_t > getDatabaseImportSequence() const override
Returns the first ledger sequence of the shard currently being imported from the NodeStore.
Definition: DatabaseShardImp.cpp:1227
std::function
std::all_of
T all_of(T... args)
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:248
ripple::LedgerMaster::walkHashBySeq
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1703
ripple::getLimitedNewestLedgerInfo
std::optional< LedgerInfo > getLimitedNewestLedgerInfo(soci::session &session, LedgerIndex ledgerFirstIndex, beast::Journal j)
getLimitedNewestLedgerInfo Returns info of newest ledger from ledgers with sequences greather or equa...
Definition: RelationalDBInterface_nodes.cpp:486
ripple::NodeStore::DatabaseShardImp::importDatabase
void importDatabase(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:742
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:293
ripple::NodeStore::DatabaseShardImp::databaseImporter_
std::thread databaseImporter_
Definition: DatabaseShardImp.h:286
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:267
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:48
ripple::NodeStore::DatabaseShardImp::iterateShardsForward
bool iterateShardsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(Shard &shard)> const &visit)
iterateShardsForward Visits all shards starting from given in ascending order and calls given callbac...
Definition: DatabaseShardImp.cpp:1997
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1149
ripple::getLimitedOldestLedgerInfo
std::optional< LedgerInfo > getLimitedOldestLedgerInfo(soci::session &session, LedgerIndex ledgerFirstIndex, beast::Journal j)
getLimitedOldestLedgerInfo Returns info of oldest ledger from ledgers with sequences greather or equa...
Definition: RelationalDBInterface_nodes.cpp:474
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:77
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:384
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:243
ripple::NodeStore::DatabaseShardImp::getDatabaseImportStatus
Json::Value getDatabaseImportStatus() const override
Definition: DatabaseShardImp.cpp:1201
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1312
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
std::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:280
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:264
ripple::NodeStore::DatabaseShardImp::callForTransactionSQLByLedgerSeq
bool callForTransactionSQLByLedgerSeq(LedgerIndex ledgerSeq, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the transaction SQLite db for the corresponding ledger.
Definition: DatabaseShardImp.cpp:1974
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::updatePeers
void updatePeers(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:2123
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:426
ripple::NodeStore::DatabaseShardImp::databaseImportStatus_
std::unique_ptr< DatabaseImportStatus > databaseImportStatus_
Definition: DatabaseShardImp.h:283
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:1063
std::thread::joinable
T joinable(T... args)
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
std::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1331
ripple::NodeStore::Database::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const noexcept
Calculates the first ledger sequence for a given shard index.
Definition: Database.h:257
ripple::Config::reporting
bool reporting() const
Definition: Config.h:291
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::NodeStore::DatabaseShardImp::iterateTransactionSQLsBack
bool iterateTransactionSQLsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateTransactionSQLsBack Checks out transaction databases for all shards in descending order starti...
Definition: DatabaseShardImp.cpp:2089
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1838
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1531
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:228
std::thread
STL class.
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::NodeStore::TaskQueue::addTask
void addTask(std::function< void()> task)
Adds a task to the queue.
Definition: TaskQueue.cpp:38
ripple::NodeStore::DatabaseShardImp::callForLedgerSQLByShardIndex
bool callForLedgerSQLByShardIndex(std::uint32_t const shardIndex, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the ledger SQLite db for the corresponding shard.
Definition: DatabaseShardImp.cpp:1960
ripple::Config
Definition: Config.h:68
ripple::RelationalDBInterfaceSqlite
Definition: RelationalDBInterfaceSqlite.h:27
ripple::NodeStore::DatabaseShardImp::doImportDatabase
void doImportDatabase()
Definition: DatabaseShardImp.cpp:763
ripple::compare
int compare(base_uint< Bits, Tag > const &a, base_uint< Bits, Tag > const &b)
Definition: base_uint.h:533
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:246
ripple::Config::standalone
bool standalone() const
Definition: Config.h:286
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1619
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::getNumTasks
size_t getNumTasks() const override
Returns the number of queued tasks.
Definition: DatabaseShardImp.cpp:2116
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard from the shard archive handler into the shard database.
Definition: DatabaseShardImp.cpp:444
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Definition: DatabaseShardImp.cpp:1080
ripple::NodeStore::TaskQueue::stop
void stop()
Definition: TaskQueue.cpp:32
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::ShardState::finalized
@ finalized
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1238
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
std::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:279
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:243
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:198
std::map
STL class.
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:158
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:328
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:240
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:70
std::weak_ptr
STL class.
ripple::NodeStore::Database::isStopping
bool isStopping() const
Definition: Database.cpp:69
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:258
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1871
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:255
ripple::NodeStore::Shard::callForTransactionSQL
bool callForTransactionSQL(std::function< bool(Args... args)> const &callback)
Invoke a callback on the transaction SQLite db.
Definition: Shard.h:237
ripple::ShardState::acquire
@ acquire
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:301
ripple::NodeStore::DatabaseShardImp::callForTransactionSQLByShardIndex
bool callForTransactionSQLByShardIndex(std::uint32_t const shardIndex, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the transaction SQLite db for the corresponding shard.
Definition: DatabaseShardImp.cpp:1983
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:552
std::vector::begin
T begin(T... args)
ripple::NodeStore::Database::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const noexcept
Calculates the shard index for a given ledger sequence.
Definition: Database.h:283
std
STL namespace.
ripple::XRP_LEDGER_EARLIEST_SEQ
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ
The XRP ledger network's earliest allowed sequence.
Definition: SystemParameters.h:61
ripple::NodeStore::DatabaseShardImp::iterateShardsBack
bool iterateShardsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(Shard &shard)> const &visit)
iterateShardsBack Visits all shards starting from given in descending order and calls given callback ...
Definition: DatabaseShardImp.cpp:2049
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1659
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:208
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1670
ripple::NodeStore::DatabaseShardImp::iterateLedgerSQLsBack
bool iterateLedgerSQLsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateLedgerSQLsBack Checks out ledger databases for all shards in descending order starting from gi...
Definition: DatabaseShardImp.cpp:2078
ripple::NodeStore::DatabaseShardImp::updateFileStats
void updateFileStats()
Definition: DatabaseShardImp.cpp:1470
ripple::NodeStore::Database::earliestLedgerSeq_
const std::uint32_t earliestLedgerSeq_
Definition: Database.h:322
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1643
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:299
std::unique
T unique(T... args)
std::optional< std::uint32_t >
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::NodeStore::Database::earliestShardIndex_
const std::uint32_t earliestShardIndex_
Definition: Database.h:325
ripple::to_string
std::string to_string(Manifest const &m)
Format the specified manifest to a string for debugging purposes.
Definition: app/misc/impl/Manifest.cpp:38
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1586
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:249
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:302
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:238
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, std::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1396
std::max
T max(T... args)
ripple::RelationalDBInterface::getHashesByIndex
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns hash of the ledger and hash of parent ledger for the ledger of given sequenc...
ripple::NodeStore::Shard
Definition: Shard.h:53
ripple::NodeStore::Database::maxLedgers
std::uint32_t maxLedgers(std::uint32_t shardIndex) const noexcept
Calculates the maximum ledgers for a given shard index.
Definition: Database.cpp:76
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::make_reverse_iterator
T make_reverse_iterator(T... args)
std::unique_ptr
STL class.
ripple::loadLedgerHelper
std::shared_ptr< Ledger > loadLedgerHelper(LedgerInfo const &info, Application &app, bool acquire)
Definition: Ledger.cpp:1017
ripple::NodeStore::DatabaseShardImp::databaseImportMarker_
static constexpr auto databaseImportMarker_
Definition: DatabaseShardImp.h:270
std::unordered_map
STL class.
ripple::PublisherStatus::available
@ available
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
std::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1807
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::thread::join
T join(T... args)
std::exception::what
T what(T... args)
ripple::ShardState::queued
@ queued
ripple::NodeStore::DatabaseShardImp::iterateTransactionSQLsForward
bool iterateTransactionSQLsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateTransactionSQLsForward Checks out transaction databases for all shards in ascending order star...
Definition: DatabaseShardImp.cpp:2037
ripple::HashPrefix::shardInfo
@ shardInfo
shard info for signing
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::NodeStore::DatabaseShardImp::prepareLedger
std::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:229
ripple::get
T & get(EitherAmount &amt)
Definition: AmountSpec.h:118
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:231
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:127
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:252
ripple::NodeStore::DatabaseShardImp::getShardInfo
std::unique_ptr< ShardInfo > getShardInfo() const override
Query information about shards held.
Definition: DatabaseShardImp.cpp:692