rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
24 #include <ripple/basics/ByteUtilities.h>
25 #include <ripple/basics/RangeSet.h>
26 #include <ripple/basics/chrono.h>
27 #include <ripple/basics/random.h>
28 #include <ripple/core/ConfigSections.h>
29 #include <ripple/nodestore/DummyScheduler.h>
30 #include <ripple/nodestore/impl/DatabaseShardImp.h>
31 #include <ripple/overlay/Overlay.h>
32 #include <ripple/overlay/predicates.h>
33 #include <ripple/protocol/HashPrefix.h>
34 #include <ripple/protocol/digest.h>
35 
36 #include <boost/algorithm/string/predicate.hpp>
37 
38 #if BOOST_OS_LINUX
39 #include <sys/statvfs.h>
40 #endif
41 
42 namespace ripple {
43 
44 namespace NodeStore {
45 
47  Application& app,
48  Scheduler& scheduler,
49  int readThreads,
51  : DatabaseShard(
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
58  , openFinalLimit_(
59  app.config().getValueFor(SizedItem::openFinalLimit, std::nullopt))
60 {
61  if (app.config().reporting())
62  {
63  Throw<std::runtime_error>(
64  "Attempted to create DatabaseShardImp in reporting mode. Reporting "
65  "does not support shards. Remove shards info from config");
66  }
67 }
68 
69 bool
71 {
72  {
73  std::lock_guard lock(mutex_);
74  if (init_)
75  {
76  JLOG(j_.error()) << "already initialized";
77  return false;
78  }
79 
80  if (!initConfig(lock))
81  {
82  JLOG(j_.error()) << "invalid configuration file settings";
83  return false;
84  }
85 
86  try
87  {
88  using namespace boost::filesystem;
89 
90  // Consolidate the main storage path and all historical paths
91  std::vector<path> paths{dir_};
92  paths.insert(
93  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
94 
95  for (auto const& path : paths)
96  {
97  if (exists(path))
98  {
99  if (!is_directory(path))
100  {
101  JLOG(j_.error()) << path << " must be a directory";
102  return false;
103  }
104  }
105  else if (!create_directories(path))
106  {
107  JLOG(j_.error())
108  << "failed to create path: " + path.string();
109  return false;
110  }
111  }
112 
114  {
115  // Check historical paths for duplicated file systems
116  if (!checkHistoricalPaths(lock))
117  return false;
118  }
119 
120  ctx_ = std::make_unique<nudb::context>();
121  ctx_->start();
122 
123  // Find shards
124  std::uint32_t openFinals{0};
125  for (auto const& path : paths)
126  {
127  for (auto const& it : directory_iterator(path))
128  {
129  // Ignore files
130  if (!is_directory(it))
131  continue;
132 
133  // Ignore nonnumerical directory names
134  auto const shardDir{it.path()};
135  auto dirName{shardDir.stem().string()};
136  if (!std::all_of(
137  dirName.begin(), dirName.end(), [](auto c) {
138  return ::isdigit(static_cast<unsigned char>(c));
139  }))
140  {
141  continue;
142  }
143 
144  // Ignore values below the earliest shard index
145  auto const shardIndex{std::stoul(dirName)};
146  if (shardIndex < earliestShardIndex_)
147  {
148  JLOG(j_.debug())
149  << "shard " << shardIndex
150  << " ignored, comes before earliest shard index "
152  continue;
153  }
154 
155  // Check if a previous database import failed
156  if (is_regular_file(shardDir / databaseImportMarker_))
157  {
158  JLOG(j_.warn())
159  << "shard " << shardIndex
160  << " previously failed database import, removing";
161  remove_all(shardDir);
162  continue;
163  }
164 
165  auto shard{std::make_shared<Shard>(
166  app_, *this, shardIndex, shardDir.parent_path(), j_)};
167  if (!shard->init(scheduler_, *ctx_))
168  {
169  // Remove corrupted or legacy shard
170  shard->removeOnDestroy();
171  JLOG(j_.warn())
172  << "shard " << shardIndex << " removed, "
173  << (shard->isLegacy() ? "legacy" : "corrupted")
174  << " shard";
175  continue;
176  }
177 
178  switch (shard->getState())
179  {
181  if (++openFinals > openFinalLimit_)
182  shard->tryClose();
183  shards_.emplace(shardIndex, std::move(shard));
184  break;
185 
188  shards_.emplace(shardIndex, std::move(shard))
189  .first->second,
190  true,
191  std::nullopt);
192  break;
193 
194  case ShardState::acquire:
195  if (acquireIndex_ != 0)
196  {
197  JLOG(j_.error())
198  << "more than one shard being acquired";
199  return false;
200  }
201 
202  shards_.emplace(shardIndex, std::move(shard));
203  acquireIndex_ = shardIndex;
204  break;
205 
206  default:
207  JLOG(j_.error())
208  << "shard " << shardIndex << " invalid state";
209  return false;
210  }
211  }
212  }
213  }
214  catch (std::exception const& e)
215  {
216  JLOG(j_.fatal()) << "Exception caught in function " << __func__
217  << ". Error: " << e.what();
218  return false;
219  }
220 
221  init_ = true;
222  }
223 
224  updateFileStats();
225  return true;
226 }
227 
230 {
231  std::optional<std::uint32_t> shardIndex;
232 
233  {
234  std::lock_guard lock(mutex_);
235  assert(init_);
236 
237  if (acquireIndex_ != 0)
238  {
239  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
240  return it->second->prepare();
241 
242  // Should never get here
243  assert(false);
244  return std::nullopt;
245  }
246 
247  if (!canAdd_)
248  return std::nullopt;
249 
250  shardIndex = findAcquireIndex(validLedgerSeq, lock);
251  }
252 
253  if (!shardIndex)
254  {
255  JLOG(j_.debug()) << "no new shards to add";
256  {
257  std::lock_guard lock(mutex_);
258  canAdd_ = false;
259  }
260  return std::nullopt;
261  }
262 
263  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
264  std::lock_guard lock(mutex_);
265  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
266  }();
267 
268  if (!pathDesignation)
269  return std::nullopt;
270 
271  auto const needsHistoricalPath =
272  *pathDesignation == PathDesignation::historical;
273 
274  auto shard = [this, shardIndex, needsHistoricalPath] {
275  std::lock_guard lock(mutex_);
276  return std::make_unique<Shard>(
277  app_,
278  *this,
279  *shardIndex,
280  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
281  j_);
282  }();
283 
284  if (!shard->init(scheduler_, *ctx_))
285  return std::nullopt;
286 
287  auto const ledgerSeq{shard->prepare()};
288  {
289  std::lock_guard lock(mutex_);
290  shards_.emplace(*shardIndex, std::move(shard));
291  acquireIndex_ = *shardIndex;
292  updatePeers(lock);
293  }
294 
295  return ledgerSeq;
296 }
297 
298 bool
300 {
301  auto fail = [j = j_, &shardIndexes](
302  std::string const& msg,
303  std::optional<std::uint32_t> shardIndex = std::nullopt) {
304  auto multipleIndexPrequel = [&shardIndexes] {
305  std::vector<std::string> indexesAsString(shardIndexes.size());
307  shardIndexes.begin(),
308  shardIndexes.end(),
309  indexesAsString.begin(),
310  [](uint32_t const index) { return std::to_string(index); });
311 
312  return std::string("shard") +
313  (shardIndexes.size() > 1 ? "s " : " ") +
314  boost::algorithm::join(indexesAsString, ", ");
315  };
316 
317  JLOG(j.error()) << (shardIndex ? "shard " + std::to_string(*shardIndex)
318  : multipleIndexPrequel())
319  << " " << msg;
320  return false;
321  };
322 
323  if (shardIndexes.empty())
324  return fail("invalid shard indexes");
325 
326  std::lock_guard lock(mutex_);
327  assert(init_);
328 
329  if (!canAdd_)
330  return fail("cannot be stored at this time");
331 
332  auto historicalShardsToPrepare = 0;
333 
334  for (auto const shardIndex : shardIndexes)
335  {
336  if (shardIndex < earliestShardIndex_)
337  {
338  return fail(
339  "comes before earliest shard index " +
341  shardIndex);
342  }
343 
344  // If we are synced to the network, check if the shard index is
345  // greater or equal to the current or validated shard index.
346  auto seqCheck = [&](std::uint32_t ledgerSeq) {
347  if (ledgerSeq >= earliestLedgerSeq_ &&
348  shardIndex >= seqToShardIndex(ledgerSeq))
349  {
350  return fail("invalid index", shardIndex);
351  }
352  return true;
353  };
354  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
356  {
357  return fail("invalid index", shardIndex);
358  }
359 
360  if (shards_.find(shardIndex) != shards_.end())
361  return fail("is already stored", shardIndex);
362 
363  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
364  return fail(
365  "is already queued for import from the shard archive handler",
366  shardIndex);
367 
369  {
370  if (auto shard = databaseImportStatus_->currentShard.lock(); shard)
371  {
372  if (shard->index() == shardIndex)
373  return fail(
374  "is being imported from the nodestore", shardIndex);
375  }
376  }
377 
378  // Any shard earlier than the two most recent shards
379  // is a historical shard
380  if (shardIndex < shardBoundaryIndex())
381  ++historicalShardsToPrepare;
382  }
383 
384  auto const numHistShards = numHistoricalShards(lock);
385 
386  // Check shard count and available storage space
387  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
388  return fail("maximum number of historical shards reached");
389 
390  if (historicalShardsToPrepare)
391  {
392  // Check available storage space for historical shards
393  if (!sufficientStorage(
394  historicalShardsToPrepare, PathDesignation::historical, lock))
395  return fail("insufficient storage space available");
396  }
397 
398  if (auto const recentShardsToPrepare =
399  shardIndexes.size() - historicalShardsToPrepare;
400  recentShardsToPrepare)
401  {
402  // Check available storage space for recent shards
403  if (!sufficientStorage(
404  recentShardsToPrepare, PathDesignation::none, lock))
405  return fail("insufficient storage space available");
406  }
407 
408  for (auto const shardIndex : shardIndexes)
409  preparedIndexes_.emplace(shardIndex);
410 
411  updatePeers(lock);
412  return true;
413 }
414 
415 void
417 {
418  std::lock_guard lock(mutex_);
419  assert(init_);
420 
421  if (preparedIndexes_.erase(shardIndex))
422  updatePeers(lock);
423 }
424 
427 {
429  {
430  std::lock_guard lock(mutex_);
431  assert(init_);
432 
433  for (auto const& shardIndex : preparedIndexes_)
434  rs.insert(shardIndex);
435  }
436 
437  if (rs.empty())
438  return {};
439 
440  return ripple::to_string(rs);
441 };
442 
443 bool
445  std::uint32_t shardIndex,
446  boost::filesystem::path const& srcDir)
447 {
448  auto fail = [&](std::string const& msg,
449  std::lock_guard<std::mutex> const& lock) {
450  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
451 
452  // Remove the failed import shard index so it can be retried
453  preparedIndexes_.erase(shardIndex);
454  updatePeers(lock);
455  return false;
456  };
457 
458  using namespace boost::filesystem;
459  try
460  {
461  if (!is_directory(srcDir) || is_empty(srcDir))
462  {
463  return fail(
464  "invalid source directory " + srcDir.string(),
466  }
467  }
468  catch (std::exception const& e)
469  {
470  return fail(
471  std::string(". Exception caught in function ") + __func__ +
472  ". Error: " + e.what(),
474  }
475 
476  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
478  if (!expectedHash)
479  return fail("expected hash not found", std::lock_guard(mutex_));
480 
481  path dstDir;
482  {
483  std::lock_guard lock(mutex_);
484  if (shards_.find(shardIndex) != shards_.end())
485  return fail("already exists", lock);
486 
487  // Check shard was prepared for import
488  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
489  return fail("was not prepared for import", lock);
490 
491  auto const pathDesignation{
492  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
493  if (!pathDesignation)
494  return fail("failed to import", lock);
495 
496  if (*pathDesignation == PathDesignation::historical)
497  dstDir = chooseHistoricalPath(lock);
498  else
499  dstDir = dir_;
500  }
501  dstDir /= std::to_string(shardIndex);
502 
503  auto renameDir = [&, fname = __func__](path const& src, path const& dst) {
504  try
505  {
506  rename(src, dst);
507  }
508  catch (std::exception const& e)
509  {
510  return fail(
511  std::string(". Exception caught in function ") + fname +
512  ". Error: " + e.what(),
514  }
515  return true;
516  };
517 
518  // Rename source directory to the shard database directory
519  if (!renameDir(srcDir, dstDir))
520  return false;
521 
522  // Create the new shard
523  auto shard{std::make_unique<Shard>(
524  app_, *this, shardIndex, dstDir.parent_path(), j_)};
525 
526  if (!shard->init(scheduler_, *ctx_) ||
527  shard->getState() != ShardState::complete)
528  {
529  shard.reset();
530  renameDir(dstDir, srcDir);
531  return fail("failed to import", std::lock_guard(mutex_));
532  }
533 
534  auto const [it, inserted] = [&]() {
535  std::lock_guard lock(mutex_);
536  preparedIndexes_.erase(shardIndex);
537  return shards_.emplace(shardIndex, std::move(shard));
538  }();
539 
540  if (!inserted)
541  {
542  shard.reset();
543  renameDir(dstDir, srcDir);
544  return fail("failed to import", std::lock_guard(mutex_));
545  }
546 
547  finalizeShard(it->second, true, expectedHash);
548  return true;
549 }
550 
553 {
554  auto const shardIndex{seqToShardIndex(ledgerSeq)};
555  {
557  {
558  std::lock_guard lock(mutex_);
559  assert(init_);
560 
561  auto const it{shards_.find(shardIndex)};
562  if (it == shards_.end())
563  return nullptr;
564  shard = it->second;
565  }
566 
567  // Ledger must be stored in a final or acquiring shard
568  switch (shard->getState())
569  {
571  break;
572  case ShardState::acquire:
573  if (shard->containsLedger(ledgerSeq))
574  break;
575  [[fallthrough]];
576  default:
577  return nullptr;
578  }
579  }
580 
581  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
582  if (!nodeObject)
583  return nullptr;
584 
585  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
586  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
587  return nullptr;
588  };
589 
590  auto ledger{std::make_shared<Ledger>(
591  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
592  app_.config(),
593  *app_.getShardFamily())};
594 
595  if (ledger->info().seq != ledgerSeq)
596  {
597  return fail(
598  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
599  }
600  if (ledger->info().hash != hash)
601  {
602  return fail(
603  "encountered invalid ledger hash " + to_string(hash) +
604  " on sequence " + std::to_string(ledgerSeq));
605  }
606 
607  ledger->setFull();
608  if (!ledger->stateMap().fetchRoot(
609  SHAMapHash{ledger->info().accountHash}, nullptr))
610  {
611  return fail(
612  "is missing root STATE node on hash " + to_string(hash) +
613  " on sequence " + std::to_string(ledgerSeq));
614  }
615 
616  if (ledger->info().txHash.isNonZero())
617  {
618  if (!ledger->txMap().fetchRoot(
619  SHAMapHash{ledger->info().txHash}, nullptr))
620  {
621  return fail(
622  "is missing root TXN node on hash " + to_string(hash) +
623  " on sequence " + std::to_string(ledgerSeq));
624  }
625  }
626  return ledger;
627 }
628 
629 void
631 {
632  auto const ledgerSeq{ledger->info().seq};
633  if (ledger->info().hash.isZero())
634  {
635  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
636  << ledgerSeq;
637  return;
638  }
639  if (ledger->info().accountHash.isZero())
640  {
641  JLOG(j_.error()) << "zero account hash for ledger sequence "
642  << ledgerSeq;
643  return;
644  }
645  if (ledger->stateMap().getHash().isNonZero() &&
646  !ledger->stateMap().isValid())
647  {
648  JLOG(j_.error()) << "invalid state map for ledger sequence "
649  << ledgerSeq;
650  return;
651  }
652  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
653  {
654  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
655  << ledgerSeq;
656  return;
657  }
658 
659  auto const shardIndex{seqToShardIndex(ledgerSeq)};
661  {
662  std::lock_guard lock(mutex_);
663  assert(init_);
664 
665  if (shardIndex != acquireIndex_)
666  {
667  JLOG(j_.trace())
668  << "shard " << shardIndex << " is not being acquired";
669  return;
670  }
671 
672  auto const it{shards_.find(shardIndex)};
673  if (it == shards_.end())
674  {
675  JLOG(j_.error())
676  << "shard " << shardIndex << " is not being acquired";
677  return;
678  }
679  shard = it->second;
680  }
681 
682  if (shard->containsLedger(ledgerSeq))
683  {
684  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
685  return;
686  }
687 
688  setStoredInShard(shard, ledger);
689 }
690 
693 {
694  std::lock_guard lock(mutex_);
695  return getShardInfo(lock);
696 }
697 
698 void
700 {
701  // Stop read threads in base before data members are destroyed
702  Database::stop();
704  {
705  std::lock_guard lock(mutex_);
706  shards.reserve(shards_.size());
707  for (auto const& [_, shard] : shards_)
708  {
709  shards.push_back(shard);
710  shard->stop();
711  }
712  shards_.clear();
713  }
714  taskQueue_.stop();
715 
716  // All shards should be expired at this point
717  for (auto const& wptr : shards)
718  {
719  if (auto const shard{wptr.lock()})
720  {
721  JLOG(j_.warn()) << " shard " << shard->index() << " unexpired";
722  }
723  }
724 
725  // Notify the shard being imported
726  // from the node store to stop
728  {
729  // A node store import is in progress
730  if (auto importShard = databaseImportStatus_->currentShard.lock();
731  importShard)
732  importShard->stop();
733  }
734 
735  // Wait for the node store import thread
736  // if necessary
739 }
740 
741 void
743 {
744  std::lock_guard lock(mutex_);
745  assert(init_);
746 
747  // Only the application local node store can be imported
748  assert(&source == &app_.getNodeStore());
749 
751  {
752  assert(false);
753  JLOG(j_.error()) << "database import already in progress";
754  return;
755  }
756 
757  // Run the lengthy node store import process in the background
758  // on a dedicated thread.
760 }
761 
762 void
764 {
765  if (isStopping())
766  return;
767 
768  auto loadLedger =
769  [this](char const* const sortOrder) -> std::optional<std::uint32_t> {
771  std::uint32_t ledgerSeq{0};
773  if (sortOrder == std::string("asc"))
774  {
775  info = dynamic_cast<RelationalDBInterfaceSqlite*>(
778  }
779  else
780  {
781  info = dynamic_cast<RelationalDBInterfaceSqlite*>(
784  }
785  if (info)
786  {
787  ledger = loadLedgerHelper(*info, app_, false);
788  ledgerSeq = info->seq;
789  }
790  if (!ledger || ledgerSeq == 0)
791  {
792  JLOG(j_.error()) << "no suitable ledgers were found in"
793  " the SQLite database to import";
794  return std::nullopt;
795  }
796  return ledgerSeq;
797  };
798 
799  // Find earliest ledger sequence stored
800  auto const earliestLedgerSeq{loadLedger("asc")};
801  if (!earliestLedgerSeq)
802  return;
803 
804  auto const earliestIndex = [&] {
805  auto earliestIndex = seqToShardIndex(*earliestLedgerSeq);
806 
807  // Consider only complete shards
808  if (earliestLedgerSeq != firstLedgerSeq(earliestIndex))
809  ++earliestIndex;
810 
811  return earliestIndex;
812  }();
813 
814  // Find last ledger sequence stored
815  auto const latestLedgerSeq = loadLedger("desc");
816  if (!latestLedgerSeq)
817  return;
818 
819  auto const latestIndex = [&] {
820  auto latestIndex = seqToShardIndex(*latestLedgerSeq);
821 
822  // Consider only complete shards
823  if (latestLedgerSeq != lastLedgerSeq(latestIndex))
824  --latestIndex;
825 
826  return latestIndex;
827  }();
828 
829  if (latestIndex < earliestIndex)
830  {
831  JLOG(j_.error()) << "no suitable ledgers were found in"
832  " the SQLite database to import";
833  return;
834  }
835 
836  JLOG(j_.debug()) << "Importing ledgers for shards " << earliestIndex
837  << " through " << latestIndex;
838 
839  {
840  std::lock_guard lock(mutex_);
841 
842  assert(!databaseImportStatus_);
843  databaseImportStatus_ = std::make_unique<DatabaseImportStatus>(
844  earliestIndex, latestIndex, 0);
845  }
846 
847  // Import the shards
848  for (std::uint32_t shardIndex = earliestIndex; shardIndex <= latestIndex;
849  ++shardIndex)
850  {
851  if (isStopping())
852  return;
853 
854  auto const pathDesignation = [this, shardIndex] {
855  std::lock_guard lock(mutex_);
856 
857  auto const numHistShards = numHistoricalShards(lock);
858  auto const pathDesignation =
859  prepareForNewShard(shardIndex, numHistShards, lock);
860 
861  return pathDesignation;
862  }();
863 
864  if (!pathDesignation)
865  break;
866 
867  {
868  std::lock_guard lock(mutex_);
869 
870  // Skip if being acquired
871  if (shardIndex == acquireIndex_)
872  {
873  JLOG(j_.debug())
874  << "shard " << shardIndex << " already being acquired";
875  continue;
876  }
877 
878  // Skip if being imported from the shard archive handler
879  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
880  {
881  JLOG(j_.debug())
882  << "shard " << shardIndex << " already being imported";
883  continue;
884  }
885 
886  // Skip if stored
887  if (shards_.find(shardIndex) != shards_.end())
888  {
889  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
890  continue;
891  }
892  }
893 
894  std::uint32_t const firstSeq = firstLedgerSeq(shardIndex);
895  std::uint32_t const lastSeq =
896  std::max(firstSeq, lastLedgerSeq(shardIndex));
897 
898  // Verify SQLite ledgers are in the node store
899  {
900  auto const ledgerHashes{
902  firstSeq, lastSeq)};
903  if (ledgerHashes.size() != maxLedgers(shardIndex))
904  continue;
905 
906  auto& source = app_.getNodeStore();
907  bool valid{true};
908 
909  for (std::uint32_t n = firstSeq; n <= lastSeq; ++n)
910  {
911  if (!source.fetchNodeObject(ledgerHashes.at(n).ledgerHash, n))
912  {
913  JLOG(j_.warn()) << "SQLite ledger sequence " << n
914  << " mismatches node store";
915  valid = false;
916  break;
917  }
918  }
919  if (!valid)
920  continue;
921  }
922 
923  if (isStopping())
924  return;
925 
926  bool const needsHistoricalPath =
927  *pathDesignation == PathDesignation::historical;
928 
929  auto const path = needsHistoricalPath
931  : dir_;
932 
933  // Create the new shard
934  auto shard{std::make_shared<Shard>(app_, *this, shardIndex, path, j_)};
935  if (!shard->init(scheduler_, *ctx_))
936  continue;
937 
938  {
939  std::lock_guard lock(mutex_);
940 
941  if (isStopping())
942  return;
943 
944  databaseImportStatus_->currentIndex = shardIndex;
945  databaseImportStatus_->currentShard = shard;
946  databaseImportStatus_->firstSeq = firstSeq;
947  databaseImportStatus_->lastSeq = lastSeq;
948  }
949 
950  // Create a marker file to signify a database import in progress
951  auto const shardDir{path / std::to_string(shardIndex)};
952  auto const markerFile{shardDir / databaseImportMarker_};
953  {
954  std::ofstream ofs{markerFile.string()};
955  if (!ofs.is_open())
956  {
957  JLOG(j_.error()) << "shard " << shardIndex
958  << " failed to create temp marker file";
959  shard->removeOnDestroy();
960  continue;
961  }
962  }
963 
964  // Copy the ledgers from node store
965  std::shared_ptr<Ledger> recentStored;
966  std::optional<uint256> lastLedgerHash;
967 
968  while (auto const ledgerSeq = shard->prepare())
969  {
970  if (isStopping())
971  return;
972 
973  auto const ledger{loadByIndex(*ledgerSeq, app_, false)};
974  if (!ledger || ledger->info().seq != ledgerSeq)
975  break;
976 
977  auto const result{shard->storeLedger(ledger, recentStored)};
978  storeStats(result.count, result.size);
979  if (result.error)
980  break;
981 
982  if (!shard->setLedgerStored(ledger))
983  break;
984 
985  if (!lastLedgerHash && ledgerSeq == lastSeq)
986  lastLedgerHash = ledger->info().hash;
987 
988  recentStored = std::move(ledger);
989  }
990 
991  if (isStopping())
992  return;
993 
994  using namespace boost::filesystem;
995  bool success{false};
996  if (lastLedgerHash && shard->getState() == ShardState::complete)
997  {
998  // Store shard final key
999  Serializer s;
1000  s.add32(Shard::version);
1001  s.add32(firstLedgerSeq(shardIndex));
1002  s.add32(lastLedgerSeq(shardIndex));
1003  s.addBitString(*lastLedgerHash);
1004  auto const nodeObject{NodeObject::createObject(
1005  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
1006 
1007  if (shard->storeNodeObject(nodeObject))
1008  {
1009  try
1010  {
1011  std::lock_guard lock(mutex_);
1012 
1013  // The database import process is complete and the
1014  // marker file is no longer required
1015  remove_all(markerFile);
1016 
1017  JLOG(j_.debug()) << "shard " << shardIndex
1018  << " was successfully imported"
1019  " from the NodeStore";
1020  finalizeShard(
1021  shards_.emplace(shardIndex, std::move(shard))
1022  .first->second,
1023  true,
1024  std::nullopt);
1025 
1026  // This variable is meant to capture the success
1027  // of everything up to the point of shard finalization.
1028  // If the shard fails to finalize, this condition will
1029  // be handled by the finalization function itself, and
1030  // not here.
1031  success = true;
1032  }
1033  catch (std::exception const& e)
1034  {
1035  JLOG(j_.fatal()) << "shard index " << shardIndex
1036  << ". Exception caught in function "
1037  << __func__ << ". Error: " << e.what();
1038  }
1039  }
1040  }
1041 
1042  if (!success)
1043  {
1044  JLOG(j_.error()) << "shard " << shardIndex
1045  << " failed to import from the NodeStore";
1046  shard->removeOnDestroy();
1047  }
1048  }
1049 
1050  {
1051  std::lock_guard lock(mutex_);
1052  if (isStopping())
1053  return;
1054 
1055  databaseImportStatus_.reset();
1056  }
1057 
1058  updateFileStats();
1059 }
1060 
1063 {
1064  std::shared_ptr<Shard> shard;
1065  {
1066  std::lock_guard lock(mutex_);
1067  assert(init_);
1068 
1069  auto const it{shards_.find(acquireIndex_)};
1070  if (it == shards_.end())
1071  return 0;
1072  shard = it->second;
1073  }
1074 
1075  return shard->getWriteLoad();
1076 }
1077 
1078 void
1080  NodeObjectType type,
1081  Blob&& data,
1082  uint256 const& hash,
1083  std::uint32_t ledgerSeq)
1084 {
1085  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1086  std::shared_ptr<Shard> shard;
1087  {
1088  std::lock_guard lock(mutex_);
1089  if (shardIndex != acquireIndex_)
1090  {
1091  JLOG(j_.trace())
1092  << "shard " << shardIndex << " is not being acquired";
1093  return;
1094  }
1095 
1096  auto const it{shards_.find(shardIndex)};
1097  if (it == shards_.end())
1098  {
1099  JLOG(j_.error())
1100  << "shard " << shardIndex << " is not being acquired";
1101  return;
1102  }
1103  shard = it->second;
1104  }
1105 
1106  auto const nodeObject{
1107  NodeObject::createObject(type, std::move(data), hash)};
1108  if (shard->storeNodeObject(nodeObject))
1109  storeStats(1, nodeObject->getData().size());
1110 }
1111 
1112 bool
1114 {
1115  auto const ledgerSeq{srcLedger->info().seq};
1116  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1117  std::shared_ptr<Shard> shard;
1118  {
1119  std::lock_guard lock(mutex_);
1120  assert(init_);
1121 
1122  if (shardIndex != acquireIndex_)
1123  {
1124  JLOG(j_.trace())
1125  << "shard " << shardIndex << " is not being acquired";
1126  return false;
1127  }
1128 
1129  auto const it{shards_.find(shardIndex)};
1130  if (it == shards_.end())
1131  {
1132  JLOG(j_.error())
1133  << "shard " << shardIndex << " is not being acquired";
1134  return false;
1135  }
1136  shard = it->second;
1137  }
1138 
1139  auto const result{shard->storeLedger(srcLedger, nullptr)};
1140  storeStats(result.count, result.size);
1141  if (result.error || result.count == 0 || result.size == 0)
1142  return false;
1143 
1144  return setStoredInShard(shard, srcLedger);
1145 }
1146 
1147 void
1149 {
1151  {
1152  std::lock_guard lock(mutex_);
1153  assert(init_);
1154 
1155  shards.reserve(shards_.size());
1156  for (auto const& e : shards_)
1157  shards.push_back(e.second);
1158  }
1159 
1161  openFinals.reserve(openFinalLimit_);
1162 
1163  for (auto const& weak : shards)
1164  {
1165  if (auto const shard{weak.lock()}; shard && shard->isOpen())
1166  {
1167  if (shard->getState() == ShardState::finalized)
1168  openFinals.emplace_back(std::move(shard));
1169  }
1170  }
1171 
1172  if (openFinals.size() > openFinalLimit_)
1173  {
1174  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1175  << openFinalLimit_ << " by "
1176  << (openFinals.size() - openFinalLimit_);
1177 
1178  // Try to close enough shards to be within the limit.
1179  // Sort ascending on last use so the oldest are removed first.
1180  std::sort(
1181  openFinals.begin(),
1182  openFinals.end(),
1183  [&](std::shared_ptr<Shard> const& lhsShard,
1184  std::shared_ptr<Shard> const& rhsShard) {
1185  return lhsShard->getLastUse() < rhsShard->getLastUse();
1186  });
1187 
1188  for (auto it{openFinals.cbegin()};
1189  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1190  {
1191  if ((*it)->tryClose())
1192  it = openFinals.erase(it);
1193  else
1194  ++it;
1195  }
1196  }
1197 }
1198 
1201 {
1203 
1205  {
1206  ret[jss::firstShardIndex] = databaseImportStatus_->earliestIndex;
1207  ret[jss::lastShardIndex] = databaseImportStatus_->latestIndex;
1208  ret[jss::currentShardIndex] = databaseImportStatus_->currentIndex;
1209 
1210  Json::Value currentShard(Json::objectValue);
1211  currentShard[jss::firstSequence] = databaseImportStatus_->firstSeq;
1212  currentShard[jss::lastSequence] = databaseImportStatus_->lastSeq;
1213 
1214  if (auto shard = databaseImportStatus_->currentShard.lock(); shard)
1215  currentShard[jss::storedSeqs] = shard->getStoredSeqs();
1216 
1217  ret[jss::currentShard] = currentShard;
1218  }
1219  else
1220  ret = "Database import not running";
1221 
1222  return ret;
1223 }
1224 
1227 {
1228  std::lock_guard lock(mutex_);
1229 
1230  if (!databaseImportStatus_)
1231  return {};
1232 
1233  return databaseImportStatus_->firstSeq;
1234 }
1235 
1236 bool
1238 {
1239  auto fail = [j = j_](std::string const& msg) {
1240  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1241  return false;
1242  };
1243 
1244  Config const& config{app_.config()};
1245  Section const& section{config.section(ConfigSection::shardDatabase())};
1246 
1247  auto compare = [&](std::string const& name, std::uint32_t defaultValue) {
1248  std::uint32_t shardDBValue{defaultValue};
1249  get_if_exists<std::uint32_t>(section, name, shardDBValue);
1250 
1251  std::uint32_t nodeDBValue{defaultValue};
1252  get_if_exists<std::uint32_t>(
1253  config.section(ConfigSection::nodeDatabase()), name, nodeDBValue);
1254 
1255  return shardDBValue == nodeDBValue;
1256  };
1257 
1258  // If ledgers_per_shard or earliest_seq are specified,
1259  // they must be equally assigned in 'node_db'
1260  if (!compare("ledgers_per_shard", DEFAULT_LEDGERS_PER_SHARD))
1261  {
1262  return fail(
1263  "and [" + ConfigSection::nodeDatabase() + "] define different '" +
1264  "ledgers_per_shard" + "' values");
1265  }
1266  if (!compare("earliest_seq", XRP_LEDGER_EARLIEST_SEQ))
1267  {
1268  return fail(
1269  "and [" + ConfigSection::nodeDatabase() + "] define different '" +
1270  "earliest_seq" + "' values");
1271  }
1272 
1273  using namespace boost::filesystem;
1274  if (!get_if_exists<path>(section, "path", dir_))
1275  return fail("'path' missing");
1276 
1277  {
1278  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1279 
1280  Section const& historicalShardPaths =
1281  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1282 
1283  auto values = historicalShardPaths.values();
1284 
1285  std::sort(values.begin(), values.end());
1286  values.erase(std::unique(values.begin(), values.end()), values.end());
1287 
1288  for (auto const& s : values)
1289  {
1290  auto const dir = path(s);
1291  if (dir_ == dir)
1292  {
1293  return fail(
1294  "the 'path' cannot also be in the "
1295  "'historical_shard_path' section");
1296  }
1297 
1299  }
1300  }
1301 
1302  // NuDB is the default and only supported permanent storage backend
1303  backendName_ = get<std::string>(section, "type", "nudb");
1304  if (!boost::iequals(backendName_, "NuDB"))
1305  return fail("'type' value unsupported");
1306 
1307  return true;
1308 }
1309 
1312  uint256 const& hash,
1313  std::uint32_t ledgerSeq,
1314  FetchReport& fetchReport)
1315 {
1316  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1317  std::shared_ptr<Shard> shard;
1318  {
1319  std::lock_guard lock(mutex_);
1320  auto const it{shards_.find(shardIndex)};
1321  if (it == shards_.end())
1322  return nullptr;
1323  shard = it->second;
1324  }
1325 
1326  return shard->fetchNodeObject(hash, fetchReport);
1327 }
1328 
1331  std::uint32_t validLedgerSeq,
1333 {
1334  if (validLedgerSeq < earliestLedgerSeq_)
1335  return std::nullopt;
1336 
1337  auto const maxShardIndex{[this, validLedgerSeq]() {
1338  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1339  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1340  --shardIndex;
1341  return shardIndex;
1342  }()};
1343  auto const maxNumShards{maxShardIndex - earliestShardIndex_ + 1};
1344 
1345  // Check if the shard store has all shards
1346  if (shards_.size() >= maxNumShards)
1347  return std::nullopt;
1348 
1349  if (maxShardIndex < 1024 ||
1350  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1351  {
1352  // Small or mostly full index space to sample
1353  // Find the available indexes and select one at random
1355  available.reserve(maxNumShards - shards_.size());
1356 
1357  for (auto shardIndex = earliestShardIndex_; shardIndex <= maxShardIndex;
1358  ++shardIndex)
1359  {
1360  if (shards_.find(shardIndex) == shards_.end() &&
1361  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1362  {
1363  available.push_back(shardIndex);
1364  }
1365  }
1366 
1367  if (available.empty())
1368  return std::nullopt;
1369 
1370  if (available.size() == 1)
1371  return available.front();
1372 
1373  return available[rand_int(
1374  0u, static_cast<std::uint32_t>(available.size() - 1))];
1375  }
1376 
1377  // Large, sparse index space to sample
1378  // Keep choosing indexes at random until an available one is found
1379  // chances of running more than 30 times is less than 1 in a billion
1380  for (int i = 0; i < 40; ++i)
1381  {
1382  auto const shardIndex{rand_int(earliestShardIndex_, maxShardIndex)};
1383  if (shards_.find(shardIndex) == shards_.end() &&
1384  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1385  {
1386  return shardIndex;
1387  }
1388  }
1389 
1390  assert(false);
1391  return std::nullopt;
1392 }
1393 
1394 void
1396  std::shared_ptr<Shard>& shard,
1397  bool const writeSQLite,
1398  std::optional<uint256> const& expectedHash)
1399 {
1400  taskQueue_.addTask([this,
1401  wptr = std::weak_ptr<Shard>(shard),
1402  writeSQLite,
1403  expectedHash]() {
1404  if (isStopping())
1405  return;
1406 
1407  auto shard{wptr.lock()};
1408  if (!shard)
1409  {
1410  JLOG(j_.debug()) << "Shard removed before being finalized";
1411  return;
1412  }
1413 
1414  if (!shard->finalize(writeSQLite, expectedHash))
1415  {
1416  if (isStopping())
1417  return;
1418 
1419  // Invalid or corrupt shard, remove it
1420  removeFailedShard(shard);
1421  return;
1422  }
1423 
1424  if (isStopping())
1425  return;
1426 
1427  {
1428  auto const boundaryIndex{shardBoundaryIndex()};
1429  std::lock_guard lock(mutex_);
1430 
1431  if (shard->index() < boundaryIndex)
1432  {
1433  // This is a historical shard
1434  if (!historicalPaths_.empty() &&
1435  shard->getDir().parent_path() == dir_)
1436  {
1437  // Shard wasn't placed at a separate historical path
1438  JLOG(j_.warn()) << "shard " << shard->index()
1439  << " is not stored at a historical path";
1440  }
1441  }
1442  else
1443  {
1444  // Not a historical shard. Shift recent shards if necessary
1445  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1446  relocateOutdatedShards(lock);
1447 
1448  // Set the appropriate recent shard index
1449  if (shard->index() == boundaryIndex)
1450  secondLatestShardIndex_ = shard->index();
1451  else
1452  latestShardIndex_ = shard->index();
1453 
1454  if (shard->getDir().parent_path() != dir_)
1455  {
1456  JLOG(j_.warn()) << "shard " << shard->index()
1457  << " is not stored at the path";
1458  }
1459  }
1460 
1461  updatePeers(lock);
1462  }
1463 
1464  updateFileStats();
1465  });
1466 }
1467 
1468 void
1470 {
1472  {
1473  std::lock_guard lock(mutex_);
1474  if (shards_.empty())
1475  return;
1476 
1477  shards.reserve(shards_.size());
1478  for (auto const& e : shards_)
1479  shards.push_back(e.second);
1480  }
1481 
1482  std::uint64_t sumSz{0};
1483  std::uint32_t sumFd{0};
1484  std::uint32_t numShards{0};
1485  for (auto const& weak : shards)
1486  {
1487  if (auto const shard{weak.lock()}; shard)
1488  {
1489  auto const [sz, fd] = shard->getFileInfo();
1490  sumSz += sz;
1491  sumFd += fd;
1492  ++numShards;
1493  }
1494  }
1495 
1496  std::lock_guard lock(mutex_);
1497  fileSz_ = sumSz;
1498  fdRequired_ = sumFd;
1499  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1500 
1501  if (!canAdd_)
1502  return;
1503 
1504  if (auto const count = numHistoricalShards(lock);
1505  count >= maxHistoricalShards_)
1506  {
1508  {
1509  // In order to avoid excessive output, don't produce
1510  // this warning if the server isn't configured to
1511  // store historical shards.
1512  JLOG(j_.warn()) << "maximum number of historical shards reached";
1513  }
1514 
1515  canAdd_ = false;
1516  }
1517  else if (!sufficientStorage(
1518  maxHistoricalShards_ - count,
1520  lock))
1521  {
1522  JLOG(j_.warn())
1523  << "maximum shard store size exceeds available storage space";
1524 
1525  canAdd_ = false;
1526  }
1527 }
1528 
1529 bool
1531  std::uint32_t numShards,
1532  PathDesignation pathDesignation,
1533  std::lock_guard<std::mutex> const&) const
1534 {
1535  try
1536  {
1537  std::vector<std::uint64_t> capacities;
1538 
1539  if (pathDesignation == PathDesignation::historical &&
1541  {
1542  capacities.reserve(historicalPaths_.size());
1543 
1544  for (auto const& path : historicalPaths_)
1545  {
1546  // Get the available storage for each historical path
1547  auto const availableSpace =
1548  boost::filesystem::space(path).available;
1549 
1550  capacities.push_back(availableSpace);
1551  }
1552  }
1553  else
1554  {
1555  // Get the available storage for the main shard path
1556  capacities.push_back(boost::filesystem::space(dir_).available);
1557  }
1558 
1559  for (std::uint64_t const capacity : capacities)
1560  {
1561  // Leverage all the historical shard paths to
1562  // see if collectively they can fit the specified
1563  // number of shards. For this to work properly,
1564  // each historical path must correspond to a separate
1565  // physical device or filesystem.
1566 
1567  auto const shardCap = capacity / avgShardFileSz_;
1568  if (numShards <= shardCap)
1569  return true;
1570 
1571  numShards -= shardCap;
1572  }
1573  }
1574  catch (std::exception const& e)
1575  {
1576  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1577  << ". Error: " << e.what();
1578  return false;
1579  }
1580 
1581  return false;
1582 }
1583 
1584 bool
1586  std::shared_ptr<Shard>& shard,
1587  std::shared_ptr<Ledger const> const& ledger)
1588 {
1589  if (!shard->setLedgerStored(ledger))
1590  {
1591  // Invalid or corrupt shard, remove it
1592  removeFailedShard(shard);
1593  return false;
1594  }
1595 
1596  if (shard->getState() == ShardState::complete)
1597  {
1598  std::lock_guard lock(mutex_);
1599  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1600  {
1601  if (shard->index() == acquireIndex_)
1602  acquireIndex_ = 0;
1603 
1604  finalizeShard(it->second, false, std::nullopt);
1605  }
1606  else
1607  {
1608  JLOG(j_.debug())
1609  << "shard " << shard->index() << " is no longer being acquired";
1610  }
1611  }
1612 
1613  updateFileStats();
1614  return true;
1615 }
1616 
1617 void
1619 {
1620  {
1621  std::lock_guard lock(mutex_);
1622 
1623  if (shard->index() == acquireIndex_)
1624  acquireIndex_ = 0;
1625 
1626  if (shard->index() == latestShardIndex_)
1627  latestShardIndex_ = std::nullopt;
1628 
1629  if (shard->index() == secondLatestShardIndex_)
1630  secondLatestShardIndex_ = std::nullopt;
1631  }
1632 
1633  shard->removeOnDestroy();
1634 
1635  // Reset the shared_ptr to invoke the shard's
1636  // destructor and remove it from the server
1637  shard.reset();
1638  updateFileStats();
1639 }
1640 
1643 {
1644  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1645 
1646  if (validIndex < earliestLedgerSeq_)
1647  return 0;
1648 
1649  // Shards with an index earlier than the recent shard boundary index
1650  // are considered historical. The three shards at or later than
1651  // this index consist of the two most recently validated shards
1652  // and the shard still in the process of being built by live
1653  // transactions.
1654  return seqToShardIndex(validIndex) - 1;
1655 }
1656 
1659  std::lock_guard<std::mutex> const& lock) const
1660 {
1661  auto const boundaryIndex{shardBoundaryIndex()};
1662  return std::count_if(
1663  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1664  return entry.first < boundaryIndex;
1665  });
1666 }
1667 
1668 void
1670  std::lock_guard<std::mutex> const& lock)
1671 {
1672  auto& cur{latestShardIndex_};
1673  auto& prev{secondLatestShardIndex_};
1674  if (!cur && !prev)
1675  return;
1676 
1677  auto const latestShardIndex =
1679  auto const separateHistoricalPath = !historicalPaths_.empty();
1680 
1681  auto const removeShard = [this](std::uint32_t const shardIndex) -> void {
1682  canAdd_ = false;
1683 
1684  if (auto it = shards_.find(shardIndex); it != shards_.end())
1685  {
1686  if (it->second)
1687  removeFailedShard(it->second);
1688  else
1689  {
1690  JLOG(j_.warn()) << "can't find shard to remove";
1691  }
1692  }
1693  else
1694  {
1695  JLOG(j_.warn()) << "can't find shard to remove";
1696  }
1697  };
1698 
1699  auto const keepShard = [this, &lock, removeShard, separateHistoricalPath](
1700  std::uint32_t const shardIndex) -> bool {
1702  {
1703  JLOG(j_.error()) << "maximum number of historical shards reached";
1704  removeShard(shardIndex);
1705  return false;
1706  }
1707  if (separateHistoricalPath &&
1709  {
1710  JLOG(j_.error()) << "insufficient storage space available";
1711  removeShard(shardIndex);
1712  return false;
1713  }
1714 
1715  return true;
1716  };
1717 
1718  // Move a shard from the main shard path to a historical shard
1719  // path by copying the contents, and creating a new shard.
1720  auto const moveShard = [this,
1721  &lock](std::uint32_t const shardIndex) -> void {
1722  auto it{shards_.find(shardIndex)};
1723  if (it == shards_.end())
1724  {
1725  JLOG(j_.warn()) << "can't find shard to move to historical path";
1726  return;
1727  }
1728 
1729  auto& shard{it->second};
1730 
1731  // Close any open file descriptors before moving the shard
1732  // directory. Don't call removeOnDestroy since that would
1733  // attempt to close the fds after the directory has been moved.
1734  if (!shard->tryClose())
1735  {
1736  JLOG(j_.warn()) << "can't close shard to move to historical path";
1737  return;
1738  }
1739 
1740  auto const dst{chooseHistoricalPath(lock)};
1741  try
1742  {
1743  // Move the shard directory to the new path
1744  boost::filesystem::rename(
1745  shard->getDir().string(), dst / std::to_string(shardIndex));
1746  }
1747  catch (...)
1748  {
1749  JLOG(j_.error()) << "shard " << shardIndex
1750  << " failed to move to historical storage";
1751  return;
1752  }
1753 
1754  // Create a shard instance at the new location
1755  shard = std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1756 
1757  // Open the new shard
1758  if (!shard->init(scheduler_, *ctx_))
1759  {
1760  JLOG(j_.error()) << "shard " << shardIndex
1761  << " failed to open in historical storage";
1762  shard->removeOnDestroy();
1763  shard.reset();
1764  }
1765  };
1766 
1767  // See if either of the recent shards needs to be updated
1768  bool const curNotSynched =
1769  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1770  bool const prevNotSynched = secondLatestShardIndex_ &&
1771  *secondLatestShardIndex_ != latestShardIndex - 1;
1772 
1773  // A new shard has been published. Move outdated
1774  // shards to historical storage as needed
1775  if (curNotSynched || prevNotSynched)
1776  {
1777  if (prev)
1778  {
1779  // Move the formerly second latest shard to historical storage
1780  if (keepShard(*prev) && separateHistoricalPath)
1781  moveShard(*prev);
1782 
1783  prev = std::nullopt;
1784  }
1785 
1786  if (cur)
1787  {
1788  // The formerly latest shard is now the second latest
1789  if (cur == latestShardIndex - 1)
1790  prev = cur;
1791 
1792  // The formerly latest shard is no longer a 'recent' shard
1793  else
1794  {
1795  // Move the formerly latest shard to historical storage
1796  if (keepShard(*cur) && separateHistoricalPath)
1797  moveShard(*cur);
1798  }
1799 
1800  cur = std::nullopt;
1801  }
1802  }
1803 }
1804 
1805 auto
1807  std::uint32_t shardIndex,
1810 {
1811  // Any shard earlier than the two most recent shards is a historical shard
1812  auto const boundaryIndex{shardBoundaryIndex()};
1813  auto const isHistoricalShard = shardIndex < boundaryIndex;
1814 
1815  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1818 
1819  // Check shard count and available storage space
1820  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1821  {
1822  JLOG(j_.error()) << "maximum number of historical shards reached";
1823  canAdd_ = false;
1824  return std::nullopt;
1825  }
1826  if (!sufficientStorage(1, designation, lock))
1827  {
1828  JLOG(j_.error()) << "insufficient storage space available";
1829  canAdd_ = false;
1830  return std::nullopt;
1831  }
1832 
1833  return designation;
1834 }
1835 
1836 boost::filesystem::path
1838 {
1839  // If not configured with separate historical paths,
1840  // use the main path (dir_) by default.
1841  if (historicalPaths_.empty())
1842  return dir_;
1843 
1844  boost::filesystem::path historicalShardPath;
1845  std::vector<boost::filesystem::path> potentialPaths;
1846 
1847  for (boost::filesystem::path const& path : historicalPaths_)
1848  {
1849  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1850  potentialPaths.push_back(path);
1851  }
1852 
1853  if (potentialPaths.empty())
1854  {
1855  JLOG(j_.error()) << "failed to select a historical shard path";
1856  return "";
1857  }
1858 
1859  std::sample(
1860  potentialPaths.begin(),
1861  potentialPaths.end(),
1862  &historicalShardPath,
1863  1,
1864  default_prng());
1865 
1866  return historicalShardPath;
1867 }
1868 
1869 bool
1871 {
1872 #if BOOST_OS_LINUX
1873  // Each historical shard path must correspond
1874  // to a directory on a distinct device or file system.
1875  // Currently, this constraint is enforced only on Linux.
1878 
1879  for (auto const& path : historicalPaths_)
1880  {
1881  struct statvfs buffer;
1882  if (statvfs(path.c_str(), &buffer))
1883  {
1884  JLOG(j_.error())
1885  << "failed to acquire stats for 'historical_shard_path': "
1886  << path;
1887  return false;
1888  }
1889 
1890  filesystemIDs[buffer.f_fsid].push_back(path.string());
1891  }
1892 
1893  bool ret = true;
1894  for (auto const& entry : filesystemIDs)
1895  {
1896  // Check to see if any of the paths are stored on the same file system
1897  if (entry.second.size() > 1)
1898  {
1899  // Two or more historical storage paths
1900  // correspond to the same file system.
1901  JLOG(j_.error())
1902  << "The following paths correspond to the same filesystem: "
1903  << boost::algorithm::join(entry.second, ", ")
1904  << ". Each configured historical storage path should"
1905  " be on a unique device or filesystem.";
1906 
1907  ret = false;
1908  }
1909  }
1910 
1911  return ret;
1912 
1913 #else
1914  // The requirement that each historical storage path
1915  // corresponds to a distinct device or file system is
1916  // enforced only on Linux, so on other platforms
1917  // keep track of the available capacities for each
1918  // path. Issue a warning if we suspect any of the paths
1919  // may violate this requirement.
1920 
1921  // Map byte counts to each path that shares that byte count.
1923  uniqueCapacities(historicalPaths_.size());
1924 
1925  for (auto const& path : historicalPaths_)
1926  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1927  path.string());
1928 
1929  for (auto const& entry : uniqueCapacities)
1930  {
1931  // Check to see if any paths have the same amount of available bytes.
1932  if (entry.second.size() > 1)
1933  {
1934  // Two or more historical storage paths may
1935  // correspond to the same device or file system.
1936  JLOG(j_.warn())
1937  << "Each of the following paths have " << entry.first
1938  << " bytes free, and may be located on the same device"
1939  " or file system: "
1940  << boost::algorithm::join(entry.second, ", ")
1941  << ". Each configured historical storage path should"
1942  " be on a unique device or file system.";
1943  }
1944  }
1945 #endif
1946 
1947  return true;
1948 }
1949 
1950 bool
1952  LedgerIndex ledgerSeq,
1953  std::function<bool(soci::session& session)> const& callback)
1954 {
1955  return callForLedgerSQLByShardIndex(seqToShardIndex(ledgerSeq), callback);
1956 }
1957 
1958 bool
1960  const uint32_t shardIndex,
1961  std::function<bool(soci::session& session)> const& callback)
1962 {
1963  std::lock_guard lock(mutex_);
1964 
1965  auto const it{shards_.find(shardIndex)};
1966 
1967  return it != shards_.end() &&
1968  it->second->getState() == ShardState::finalized &&
1969  it->second->callForLedgerSQL(callback);
1970 }
1971 
1972 bool
1974  LedgerIndex ledgerSeq,
1975  std::function<bool(soci::session& session)> const& callback)
1976 {
1978  seqToShardIndex(ledgerSeq), callback);
1979 }
1980 
1981 bool
1983  std::uint32_t const shardIndex,
1984  std::function<bool(soci::session& session)> const& callback)
1985 {
1986  std::lock_guard lock(mutex_);
1987 
1988  auto const it{shards_.find(shardIndex)};
1989 
1990  return it != shards_.end() &&
1991  it->second->getState() == ShardState::finalized &&
1992  it->second->callForTransactionSQL(callback);
1993 }
1994 
1995 bool
1997  std::optional<std::uint32_t> minShardIndex,
1998  std::function<bool(Shard& shard)> const& visit)
1999 {
2000  std::lock_guard lock(mutex_);
2001 
2003 
2004  if (!minShardIndex)
2005  it = shards_.begin();
2006  else
2007  it = shards_.lower_bound(*minShardIndex);
2008 
2009  eit = shards_.end();
2010 
2011  for (; it != eit; it++)
2012  {
2013  if (it->second->getState() == ShardState::finalized)
2014  {
2015  if (!visit(*it->second))
2016  return false;
2017  }
2018  }
2019 
2020  return true;
2021 }
2022 
2023 bool
2025  std::optional<std::uint32_t> minShardIndex,
2026  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2027  callback)
2028 {
2029  return iterateShardsForward(
2030  minShardIndex, [&callback](Shard& shard) -> bool {
2031  return shard.callForLedgerSQL(callback);
2032  });
2033 }
2034 
2035 bool
2037  std::optional<std::uint32_t> minShardIndex,
2038  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2039  callback)
2040 {
2041  return iterateShardsForward(
2042  minShardIndex, [&callback](Shard& shard) -> bool {
2043  return shard.callForTransactionSQL(callback);
2044  });
2045 }
2046 
2047 bool
2049  std::optional<std::uint32_t> maxShardIndex,
2050  std::function<bool(Shard& shard)> const& visit)
2051 {
2052  std::lock_guard lock(mutex_);
2053 
2054  std::map<std::uint32_t, std::shared_ptr<Shard>>::reverse_iterator it, eit;
2055 
2056  if (!maxShardIndex)
2057  it = shards_.rbegin();
2058  else
2059  it = std::make_reverse_iterator(shards_.upper_bound(*maxShardIndex));
2060 
2061  eit = shards_.rend();
2062 
2063  for (; it != eit; it++)
2064  {
2065  if (it->second->getState() == ShardState::finalized &&
2066  (!maxShardIndex || it->first <= *maxShardIndex))
2067  {
2068  if (!visit(*it->second))
2069  return false;
2070  }
2071  }
2072 
2073  return true;
2074 }
2075 
2076 bool
2078  std::optional<std::uint32_t> maxShardIndex,
2079  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2080  callback)
2081 {
2082  return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool {
2083  return shard.callForLedgerSQL(callback);
2084  });
2085 }
2086 
2087 bool
2089  std::optional<std::uint32_t> maxShardIndex,
2090  std::function<bool(soci::session& session, std::uint32_t shardIndex)> const&
2091  callback)
2092 {
2093  return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool {
2094  return shard.callForTransactionSQL(callback);
2095  });
2096 }
2097 
2100 {
2101  auto shardInfo{std::make_unique<ShardInfo>()};
2102  for (auto const& [_, shard] : shards_)
2103  {
2104  shardInfo->update(
2105  shard->index(), shard->getState(), shard->getPercentProgress());
2106  }
2107 
2108  for (auto const shardIndex : preparedIndexes_)
2109  shardInfo->update(shardIndex, ShardState::queued, 0);
2110 
2111  return shardInfo;
2112 }
2113 
2114 size_t
2116 {
2117  std::lock_guard lock(mutex_);
2118  return taskQueue_.size();
2119 }
2120 
2121 void
2123 {
2124  if (!app_.config().standalone() &&
2126  {
2127  auto const message{getShardInfo(lock)->makeMessage(app_)};
2128  app_.overlay().foreach(send_always(std::make_shared<Message>(
2129  message, protocol::mtPEER_SHARD_INFO_V2)));
2130  }
2131 }
2132 
2133 //------------------------------------------------------------------------------
2134 
2137  Application& app,
2138  Scheduler& scheduler,
2139  int readThreads,
2140  beast::Journal j)
2141 {
2142  // The shard store is optional. Future changes will require it.
2143  Section const& section{
2145  if (section.empty())
2146  return nullptr;
2147 
2148  return std::make_unique<DatabaseShardImp>(app, scheduler, readThreads, j);
2149 }
2150 
2151 } // namespace NodeStore
2152 } // namespace ripple
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::NodeStore::DatabaseShardImp::iterateLedgerSQLsForward
bool iterateLedgerSQLsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateLedgerSQLsForward Checks out ledger databases for all shards in ascending order starting from ...
Definition: DatabaseShardImp.cpp:2024
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::NodeStore::Database::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const noexcept
Calculates the last ledger sequence for a given shard index.
Definition: Database.h:271
ripple::Application
Definition: Application.h:103
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:2136
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:227
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:226
ripple::ShardState::complete
@ complete
ripple::DEFAULT_LEDGERS_PER_SHARD
static constexpr std::uint32_t DEFAULT_LEDGERS_PER_SHARD
The number of ledgers in a shard.
Definition: SystemParameters.h:64
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1113
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1061
ripple::SizedItem
SizedItem
Definition: Config.h:48
ripple::NodeStore::DatabaseShardImp::shards_
std::map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:237
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:190
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:178
ripple::NodeStore::DatabaseShardImp::callForLedgerSQLByLedgerSeq
bool callForLedgerSQLByLedgerSeq(LedgerIndex ledgerSeq, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the SQLite db holding the corresponding ledger.
Definition: DatabaseShardImp.cpp:1951
ripple::NodeStore::TaskQueue::size
size_t size() const
Return the queue size.
Definition: TaskQueue.cpp:48
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:416
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:214
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:303
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:261
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
ripple::NodeStore::Shard::callForLedgerSQL
bool callForLedgerSQL(std::function< bool(Args... args)> const &callback)
Invoke a callback on the ledger SQLite db.
Definition: Shard.h:225
ripple::NodeStore::DatabaseShardImp::stop
void stop() override
Definition: DatabaseShardImp.cpp:699
std::vector::size
T size(T... args)
ripple::Application::getRelationalDBInterface
virtual RelationalDBInterface & getRelationalDBInterface()=0
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
TaskQueue taskQueue_
Definition: DatabaseShardImp.h:234
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:630
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::Database::stop
virtual void stop()
Definition: Database.cpp:89
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::NodeStore::DatabaseShardImp::getDatabaseImportSequence
std::optional< std::uint32_t > getDatabaseImportSequence() const override
Returns the first ledger sequence of the shard currently being imported from the NodeStore.
Definition: DatabaseShardImp.cpp:1226
std::function
std::all_of
T all_of(T... args)
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:248
ripple::LedgerMaster::walkHashBySeq
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1703
ripple::getLimitedNewestLedgerInfo
std::optional< LedgerInfo > getLimitedNewestLedgerInfo(soci::session &session, LedgerIndex ledgerFirstIndex, beast::Journal j)
getLimitedNewestLedgerInfo Returns info of newest ledger from ledgers with sequences greather or equa...
Definition: RelationalDBInterface_nodes.cpp:486
ripple::NodeStore::DatabaseShardImp::importDatabase
void importDatabase(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:742
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:293
ripple::NodeStore::DatabaseShardImp::databaseImporter_
std::thread databaseImporter_
Definition: DatabaseShardImp.h:286
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:267
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:48
ripple::NodeStore::DatabaseShardImp::iterateShardsForward
bool iterateShardsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(Shard &shard)> const &visit)
iterateShardsForward Visits all shards starting from given in ascending order and calls given callbac...
Definition: DatabaseShardImp.cpp:1996
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1148
ripple::getLimitedOldestLedgerInfo
std::optional< LedgerInfo > getLimitedOldestLedgerInfo(soci::session &session, LedgerIndex ledgerFirstIndex, beast::Journal j)
getLimitedOldestLedgerInfo Returns info of oldest ledger from ledgers with sequences greather or equa...
Definition: RelationalDBInterface_nodes.cpp:474
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:243
ripple::NodeStore::DatabaseShardImp::getDatabaseImportStatus
Json::Value getDatabaseImportStatus() const override
Definition: DatabaseShardImp.cpp:1200
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1311
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
std::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:280
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:264
ripple::NodeStore::DatabaseShardImp::callForTransactionSQLByLedgerSeq
bool callForTransactionSQLByLedgerSeq(LedgerIndex ledgerSeq, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the transaction SQLite db for the corresponding ledger.
Definition: DatabaseShardImp.cpp:1973
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::updatePeers
void updatePeers(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:2122
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:426
ripple::NodeStore::DatabaseShardImp::databaseImportStatus_
std::unique_ptr< DatabaseImportStatus > databaseImportStatus_
Definition: DatabaseShardImp.h:283
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:1062
std::thread::joinable
T joinable(T... args)
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
std::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1330
ripple::NodeStore::Database::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const noexcept
Calculates the first ledger sequence for a given shard index.
Definition: Database.h:257
ripple::Config::reporting
bool reporting() const
Definition: Config.h:276
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::NodeStore::DatabaseShardImp::iterateTransactionSQLsBack
bool iterateTransactionSQLsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateTransactionSQLsBack Checks out transaction databases for all shards in descending order starti...
Definition: DatabaseShardImp.cpp:2088
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1837
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1530
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:228
std::thread
STL class.
Json::objectValue
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::NodeStore::TaskQueue::addTask
void addTask(std::function< void()> task)
Adds a task to the queue.
Definition: TaskQueue.cpp:38
ripple::NodeStore::DatabaseShardImp::callForLedgerSQLByShardIndex
bool callForLedgerSQLByShardIndex(std::uint32_t const shardIndex, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the ledger SQLite db for the corresponding shard.
Definition: DatabaseShardImp.cpp:1959
ripple::Config
Definition: Config.h:68
ripple::RelationalDBInterfaceSqlite
Definition: RelationalDBInterfaceSqlite.h:27
ripple::NodeStore::DatabaseShardImp::doImportDatabase
void doImportDatabase()
Definition: DatabaseShardImp.cpp:763
ripple::compare
int compare(base_uint< Bits, Tag > const &a, base_uint< Bits, Tag > const &b)
Definition: base_uint.h:530
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:246
ripple::Config::standalone
bool standalone() const
Definition: Config.h:271
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1618
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::getNumTasks
size_t getNumTasks() const override
Returns the number of queued tasks.
Definition: DatabaseShardImp.cpp:2115
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard from the shard archive handler into the shard database.
Definition: DatabaseShardImp.cpp:444
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Definition: DatabaseShardImp.cpp:1079
ripple::NodeStore::TaskQueue::stop
void stop()
Definition: TaskQueue.cpp:32
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::ShardState::finalized
@ finalized
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1237
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
std::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:279
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:243
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:185
std::map
STL class.
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:158
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:328
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:240
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:70
std::weak_ptr
STL class.
ripple::NodeStore::Database::isStopping
bool isStopping() const
Definition: Database.cpp:69
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:258
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1870
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:255
ripple::NodeStore::Shard::callForTransactionSQL
bool callForTransactionSQL(std::function< bool(Args... args)> const &callback)
Invoke a callback on the transaction SQLite db.
Definition: Shard.h:237
ripple::ShardState::acquire
@ acquire
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:301
ripple::NodeStore::DatabaseShardImp::callForTransactionSQLByShardIndex
bool callForTransactionSQLByShardIndex(std::uint32_t const shardIndex, std::function< bool(soci::session &session)> const &callback) override
Invoke a callback on the transaction SQLite db for the corresponding shard.
Definition: DatabaseShardImp.cpp:1982
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:552
std::vector::begin
T begin(T... args)
ripple::NodeStore::Database::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const noexcept
Calculates the shard index for a given ledger sequence.
Definition: Database.h:283
std
STL namespace.
ripple::XRP_LEDGER_EARLIEST_SEQ
static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ
The XRP ledger network's earliest allowed sequence.
Definition: SystemParameters.h:61
ripple::NodeStore::DatabaseShardImp::iterateShardsBack
bool iterateShardsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(Shard &shard)> const &visit)
iterateShardsBack Visits all shards starting from given in descending order and calls given callback ...
Definition: DatabaseShardImp.cpp:2048
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1658
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:208
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1669
ripple::NodeStore::DatabaseShardImp::iterateLedgerSQLsBack
bool iterateLedgerSQLsBack(std::optional< std::uint32_t > maxShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateLedgerSQLsBack Checks out ledger databases for all shards in descending order starting from gi...
Definition: DatabaseShardImp.cpp:2077
ripple::NodeStore::DatabaseShardImp::updateFileStats
void updateFileStats()
Definition: DatabaseShardImp.cpp:1469
ripple::NodeStore::Database::earliestLedgerSeq_
const std::uint32_t earliestLedgerSeq_
Definition: Database.h:322
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1642
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:299
std::unique
T unique(T... args)
std::optional< std::uint32_t >
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::NodeStore::Database::earliestShardIndex_
const std::uint32_t earliestShardIndex_
Definition: Database.h:325
ripple::to_string
std::string to_string(Manifest const &m)
Format the specified manifest to a string for debugging purposes.
Definition: app/misc/impl/Manifest.cpp:39
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1585
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:249
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:302
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const noexcept
Definition: Database.h:238
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, std::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1395
std::max
T max(T... args)
ripple::RelationalDBInterface::getHashesByIndex
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns hash of the ledger and hash of parent ledger for the ledger of given sequenc...
ripple::NodeStore::Shard
Definition: Shard.h:53
ripple::NodeStore::Database::maxLedgers
std::uint32_t maxLedgers(std::uint32_t shardIndex) const noexcept
Calculates the maximum ledgers for a given shard index.
Definition: Database.cpp:76
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::make_reverse_iterator
T make_reverse_iterator(T... args)
std::unique_ptr
STL class.
ripple::loadLedgerHelper
std::shared_ptr< Ledger > loadLedgerHelper(LedgerInfo const &info, Application &app, bool acquire)
Definition: Ledger.cpp:1017
ripple::NodeStore::DatabaseShardImp::databaseImportMarker_
static constexpr auto databaseImportMarker_
Definition: DatabaseShardImp.h:270
std::unordered_map
STL class.
ripple::PublisherStatus::available
@ available
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
std::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1806
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::thread::join
T join(T... args)
std::exception::what
T what(T... args)
ripple::ShardState::queued
@ queued
ripple::NodeStore::DatabaseShardImp::iterateTransactionSQLsForward
bool iterateTransactionSQLsForward(std::optional< std::uint32_t > minShardIndex, std::function< bool(soci::session &session, std::uint32_t shardIndex)> const &callback) override
iterateTransactionSQLsForward Checks out transaction databases for all shards in ascending order star...
Definition: DatabaseShardImp.cpp:2036
ripple::HashPrefix::shardInfo
@ shardInfo
shard info for signing
Json::Value
Represents a JSON value.
Definition: json_value.h:145
ripple::NodeStore::DatabaseShardImp::prepareLedger
std::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:229
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:231
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:252
ripple::NodeStore::DatabaseShardImp::getShardInfo
std::unique_ptr< ShardInfo > getShardInfo() const override
Query information about shards held.
Definition: DatabaseShardImp.cpp:692