rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
61  , openFinalLimit_(
62  app.config().getValueFor(SizedItem::openFinalLimit, boost::none))
63 {
64 }
65 
66 bool
68 {
69  {
70  std::lock_guard lock(mutex_);
71  if (init_)
72  {
73  JLOG(j_.error()) << "already initialized";
74  return false;
75  }
76 
77  if (!initConfig(lock))
78  {
79  JLOG(j_.error()) << "invalid configuration file settings";
80  return false;
81  }
82 
83  try
84  {
85  using namespace boost::filesystem;
86 
87  // Consolidate the main storage path and all historical paths
88  std::vector<path> paths{dir_};
89  paths.insert(
90  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
91 
92  for (auto const& path : paths)
93  {
94  if (exists(path))
95  {
96  if (!is_directory(path))
97  {
98  JLOG(j_.error()) << path << " must be a directory";
99  return false;
100  }
101  }
102  else if (!create_directories(path))
103  {
104  JLOG(j_.error())
105  << "failed to create path: " + path.string();
106  return false;
107  }
108  }
109 
111  {
112  // Check historical paths for duplicated file systems
113  if (!checkHistoricalPaths())
114  return false;
115  }
116 
117  ctx_ = std::make_unique<nudb::context>();
118  ctx_->start();
119 
120  // Find shards
121  std::uint32_t openFinals{0};
122  for (auto const& path : paths)
123  {
124  for (auto const& it : directory_iterator(path))
125  {
126  // Ignore files
127  if (!is_directory(it))
128  continue;
129 
130  // Ignore nonnumerical directory names
131  auto const shardDir{it.path()};
132  auto dirName{shardDir.stem().string()};
133  if (!std::all_of(
134  dirName.begin(), dirName.end(), [](auto c) {
135  return ::isdigit(static_cast<unsigned char>(c));
136  }))
137  {
138  continue;
139  }
140 
141  // Ignore values below the earliest shard index
142  auto const shardIndex{std::stoul(dirName)};
143  if (shardIndex < earliestShardIndex())
144  {
145  JLOG(j_.debug())
146  << "shard " << shardIndex
147  << " ignored, comes before earliest shard index "
148  << earliestShardIndex();
149  continue;
150  }
151 
152  // Check if a previous import failed
153  if (is_regular_file(shardDir / importMarker_))
154  {
155  JLOG(j_.warn())
156  << "shard " << shardIndex
157  << " previously failed import, removing";
158  remove_all(shardDir);
159  continue;
160  }
161 
162  auto shard{std::make_shared<Shard>(
163  app_, *this, shardIndex, shardDir.parent_path(), j_)};
164  if (!shard->init(scheduler_, *ctx_))
165  {
166  // Remove corrupted or legacy shard
167  shard->removeOnDestroy();
168  JLOG(j_.warn())
169  << "shard " << shardIndex << " removed, "
170  << (shard->isLegacy() ? "legacy" : "corrupted")
171  << " shard";
172  continue;
173  }
174 
175  switch (shard->getState())
176  {
177  case Shard::final:
178  if (++openFinals > openFinalLimit_)
179  shard->tryClose();
180  shards_.emplace(shardIndex, std::move(shard));
181  break;
182 
183  case Shard::complete:
185  shards_.emplace(shardIndex, std::move(shard))
186  .first->second,
187  true,
188  boost::none);
189  break;
190 
191  case Shard::acquire:
192  if (acquireIndex_ != 0)
193  {
194  JLOG(j_.error())
195  << "more than one shard being acquired";
196  return false;
197  }
198 
199  shards_.emplace(shardIndex, std::move(shard));
200  acquireIndex_ = shardIndex;
201  break;
202 
203  default:
204  JLOG(j_.error())
205  << "shard " << shardIndex << " invalid state";
206  return false;
207  }
208  }
209  }
210  }
211  catch (std::exception const& e)
212  {
213  JLOG(j_.fatal()) << "Exception caught in function " << __func__
214  << ". Error: " << e.what();
215  return false;
216  }
217 
218  updateStatus(lock);
220  init_ = true;
221  }
222 
223  setFileStats();
224  return true;
225 }
226 
227 boost::optional<std::uint32_t>
229 {
230  boost::optional<std::uint32_t> shardIndex;
231 
232  {
233  std::lock_guard lock(mutex_);
234  assert(init_);
235 
236  if (acquireIndex_ != 0)
237  {
238  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
239  return it->second->prepare();
240 
241  // Should never get here
242  assert(false);
243  return boost::none;
244  }
245 
246  if (!canAdd_)
247  return boost::none;
248 
249  shardIndex = findAcquireIndex(validLedgerSeq, lock);
250  }
251 
252  if (!shardIndex)
253  {
254  JLOG(j_.debug()) << "no new shards to add";
255  {
256  std::lock_guard lock(mutex_);
257  canAdd_ = false;
258  }
259  return boost::none;
260  }
261 
262  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
263  std::lock_guard lock(mutex_);
264  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
265  }();
266 
267  if (!pathDesignation)
268  return boost::none;
269 
270  auto const needsHistoricalPath =
271  *pathDesignation == PathDesignation::historical;
272 
273  auto shard = [this, shardIndex, needsHistoricalPath] {
274  std::lock_guard lock(mutex_);
275  return std::make_unique<Shard>(
276  app_,
277  *this,
278  *shardIndex,
279  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
280  j_);
281  }();
282 
283  if (!shard->init(scheduler_, *ctx_))
284  return boost::none;
285 
286  auto const ledgerSeq{shard->prepare()};
287  {
288  std::lock_guard lock(mutex_);
289  shards_.emplace(*shardIndex, std::move(shard));
290  acquireIndex_ = *shardIndex;
291  }
292  return ledgerSeq;
293 }
294 
295 bool
297 {
298  auto fail = [j = j_, &shardIndexes](
299  std::string const& msg,
300  boost::optional<std::uint32_t> shardIndex = boost::none) {
301  auto multipleIndexPrequel = [&shardIndexes] {
302  std::vector<std::string> indexesAsString(shardIndexes.size());
304  shardIndexes.begin(),
305  shardIndexes.end(),
306  indexesAsString.begin(),
307  [](uint32_t const index) { return std::to_string(index); });
308 
309  return std::string("shard") +
310  (shardIndexes.size() > 1 ? "s " : " ") +
311  boost::algorithm::join(indexesAsString, ", ");
312  };
313 
314  std::string const prequel = shardIndex
315  ? "shard " + std::to_string(*shardIndex)
316  : multipleIndexPrequel();
317 
318  JLOG(j.error()) << prequel << " " << msg;
319  return false;
320  };
321 
322  std::lock_guard lock(mutex_);
323  assert(init_);
324 
325  if (!canAdd_)
326  return fail("cannot be stored at this time");
327 
328  auto historicalShardsToPrepare = 0;
329 
330  for (auto const shardIndex : shardIndexes)
331  {
332  if (shardIndex < earliestShardIndex())
333  {
334  return fail(
335  "comes before earliest shard index " +
337  shardIndex);
338  }
339 
340  // If we are synced to the network, check if the shard index is
341  // greater or equal to the current or validated shard index.
342  auto seqCheck = [&](std::uint32_t ledgerSeq) {
343  if (ledgerSeq >= earliestLedgerSeq() &&
344  shardIndex >= seqToShardIndex(ledgerSeq))
345  {
346  return fail("invalid index", shardIndex);
347  }
348  return true;
349  };
350  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
352  {
353  return fail("invalid index", shardIndex);
354  }
355 
356  if (shards_.find(shardIndex) != shards_.end())
357  return fail("is already stored", shardIndex);
358 
359  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
360  return fail("is already queued for import", shardIndex);
361 
362  // Any shard earlier than the two most recent shards
363  // is a historical shard
364  if (shardIndex < shardBoundaryIndex())
365  ++historicalShardsToPrepare;
366  }
367 
368  auto const numHistShards = numHistoricalShards(lock);
369 
370  // Check shard count and available storage space
371  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
372  return fail("maximum number of historical shards reached");
373 
374  if (historicalShardsToPrepare)
375  {
376  // Check available storage space for historical shards
377  if (!sufficientStorage(
378  historicalShardsToPrepare, PathDesignation::historical, lock))
379  return fail("insufficient storage space available");
380  }
381 
382  if (auto const recentShardsToPrepare =
383  shardIndexes.size() - historicalShardsToPrepare;
384  recentShardsToPrepare)
385  {
386  // Check available storage space for recent shards
387  if (!sufficientStorage(
388  recentShardsToPrepare, PathDesignation::none, lock))
389  return fail("insufficient storage space available");
390  }
391 
392  for (auto const shardIndex : shardIndexes)
393  {
394  auto const prepareSuccessful =
395  preparedIndexes_.emplace(shardIndex).second;
396 
397  (void)prepareSuccessful;
398  assert(prepareSuccessful);
399  }
400 
401  return true;
402 }
403 
404 void
406 {
407  std::lock_guard lock(mutex_);
408  assert(init_);
409 
410  preparedIndexes_.erase(shardIndex);
411 }
412 
415 {
417  {
418  std::lock_guard lock(mutex_);
419  assert(init_);
420 
421  for (auto const& shardIndex : preparedIndexes_)
422  rs.insert(shardIndex);
423  }
424 
425  if (rs.empty())
426  return {};
427 
428  return to_string(rs);
429 };
430 
431 bool
433  std::uint32_t shardIndex,
434  boost::filesystem::path const& srcDir)
435 {
436  auto fail = [&](std::string const& msg,
437  std::lock_guard<std::mutex> const& lock) {
438  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
439 
440  // Remove the failed import shard index so it can be retried
441  preparedIndexes_.erase(shardIndex);
442  return false;
443  };
444 
445  using namespace boost::filesystem;
446  try
447  {
448  if (!is_directory(srcDir) || is_empty(srcDir))
449  {
450  return fail(
451  "invalid source directory " + srcDir.string(),
453  }
454  }
455  catch (std::exception const& e)
456  {
457  return fail(
458  std::string(". Exception caught in function ") + __func__ +
459  ". Error: " + e.what(),
461  }
462 
463  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
465  if (!expectedHash)
466  return fail("expected hash not found", std::lock_guard(mutex_));
467 
468  path dstDir;
469  {
470  std::lock_guard lock(mutex_);
471  if (shards_.find(shardIndex) != shards_.end())
472  return fail("already exists", lock);
473 
474  // Check shard was prepared for import
475  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
476  return fail("was not prepared for import", lock);
477 
478  auto const pathDesignation{
479  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
480  if (!pathDesignation)
481  return fail("failed to import", lock);
482 
483  if (*pathDesignation == PathDesignation::historical)
484  dstDir = chooseHistoricalPath(lock);
485  else
486  dstDir = dir_;
487  }
488  dstDir /= std::to_string(shardIndex);
489 
490  auto renameDir = [&](path const& src, path const& dst) {
491  try
492  {
493  rename(src, dst);
494  }
495  catch (std::exception const& e)
496  {
497  return fail(
498  std::string(". Exception caught in function ") + __func__ +
499  ". Error: " + e.what(),
501  }
502  return true;
503  };
504 
505  // Rename source directory to the shard database directory
506  if (!renameDir(srcDir, dstDir))
507  return false;
508 
509  // Create the new shard
510  auto shard{std::make_unique<Shard>(
511  app_, *this, shardIndex, dstDir.parent_path(), j_)};
512 
513  if (!shard->init(scheduler_, *ctx_) || shard->getState() != Shard::complete)
514  {
515  shard.reset();
516  renameDir(dstDir, srcDir);
517  return fail("failed to import", std::lock_guard(mutex_));
518  }
519 
520  auto const [it, inserted] = [&]() {
521  std::lock_guard lock(mutex_);
522  preparedIndexes_.erase(shardIndex);
523  return shards_.emplace(shardIndex, std::move(shard));
524  }();
525 
526  if (!inserted)
527  {
528  shard.reset();
529  renameDir(dstDir, srcDir);
530  return fail("failed to import", std::lock_guard(mutex_));
531  }
532 
533  finalizeShard(it->second, true, expectedHash);
534  return true;
535 }
536 
539 {
540  auto const shardIndex{seqToShardIndex(ledgerSeq)};
541  {
543  {
544  std::lock_guard lock(mutex_);
545  assert(init_);
546 
547  auto const it{shards_.find(shardIndex)};
548  if (it == shards_.end())
549  return nullptr;
550  shard = it->second;
551  }
552 
553  // Ledger must be stored in a final or acquiring shard
554  switch (shard->getState())
555  {
556  case Shard::final:
557  break;
558  case Shard::acquire:
559  if (shard->containsLedger(ledgerSeq))
560  break;
561  [[fallthrough]];
562  default:
563  return nullptr;
564  }
565  }
566 
567  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
568  if (!nodeObject)
569  return nullptr;
570 
571  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
572  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
573  return nullptr;
574  };
575 
576  auto ledger{std::make_shared<Ledger>(
577  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
578  app_.config(),
579  *app_.getShardFamily())};
580 
581  if (ledger->info().seq != ledgerSeq)
582  {
583  return fail(
584  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
585  }
586  if (ledger->info().hash != hash)
587  {
588  return fail(
589  "encountered invalid ledger hash " + to_string(hash) +
590  " on sequence " + std::to_string(ledgerSeq));
591  }
592 
593  ledger->setFull();
594  if (!ledger->stateMap().fetchRoot(
595  SHAMapHash{ledger->info().accountHash}, nullptr))
596  {
597  return fail(
598  "is missing root STATE node on hash " + to_string(hash) +
599  " on sequence " + std::to_string(ledgerSeq));
600  }
601 
602  if (ledger->info().txHash.isNonZero())
603  {
604  if (!ledger->txMap().fetchRoot(
605  SHAMapHash{ledger->info().txHash}, nullptr))
606  {
607  return fail(
608  "is missing root TXN node on hash " + to_string(hash) +
609  " on sequence " + std::to_string(ledgerSeq));
610  }
611  }
612  return ledger;
613 }
614 
615 void
617 {
618  auto const ledgerSeq{ledger->info().seq};
619  if (ledger->info().hash.isZero())
620  {
621  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
622  << ledgerSeq;
623  return;
624  }
625  if (ledger->info().accountHash.isZero())
626  {
627  JLOG(j_.error()) << "zero account hash for ledger sequence "
628  << ledgerSeq;
629  return;
630  }
631  if (ledger->stateMap().getHash().isNonZero() &&
632  !ledger->stateMap().isValid())
633  {
634  JLOG(j_.error()) << "invalid state map for ledger sequence "
635  << ledgerSeq;
636  return;
637  }
638  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
639  {
640  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
641  << ledgerSeq;
642  return;
643  }
644 
645  auto const shardIndex{seqToShardIndex(ledgerSeq)};
647  {
648  std::lock_guard lock(mutex_);
649  assert(init_);
650 
651  if (shardIndex != acquireIndex_)
652  {
653  JLOG(j_.trace())
654  << "shard " << shardIndex << " is not being acquired";
655  return;
656  }
657 
658  auto const it{shards_.find(shardIndex)};
659  if (it == shards_.end())
660  {
661  JLOG(j_.error())
662  << "shard " << shardIndex << " is not being acquired";
663  return;
664  }
665  shard = it->second;
666  }
667 
668  if (shard->containsLedger(ledgerSeq))
669  {
670  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
671  return;
672  }
673 
674  setStoredInShard(shard, ledger);
675 }
676 
679 {
680  std::lock_guard lock(mutex_);
681  assert(init_);
682 
683  return status_;
684 }
685 
686 void
688 {
689  // Stop read threads in base before data members are destroyed
690  stopReadThreads();
691 
692  std::lock_guard lock(mutex_);
693 
694  // Notify shards to stop
695  for (auto const& e : shards_)
696  e.second->stop();
697 }
698 
699 void
701 {
703  {
704  std::lock_guard lock(mutex_);
705 
706  shards.reserve(shards_.size());
707  for (auto const& e : shards_)
708  shards.push_back(e.second);
709  shards_.clear();
710  }
711 
712  // All shards should be expired at this point
713  for (auto const& e : shards)
714  {
715  if (!e.expired())
716  {
717  std::string shardIndex;
718  if (auto const shard{e.lock()}; shard)
719  shardIndex = std::to_string(shard->index());
720 
721  JLOG(j_.warn()) << " shard " << shardIndex << " unexpired";
722  }
723  }
724 
725  if (areChildrenStopped())
726  stopped();
727  else
728  {
729  JLOG(j_.warn()) << " Children failed to stop";
730  }
731 }
732 
733 void
735 {
736  {
737  std::lock_guard lock(mutex_);
738  assert(init_);
739 
740  // Only the application local node store can be imported
741  if (&source != &app_.getNodeStore())
742  {
743  assert(false);
744  JLOG(j_.error()) << "invalid source database";
745  return;
746  }
747 
748  std::uint32_t earliestIndex;
749  std::uint32_t latestIndex;
750  {
751  auto loadLedger = [&](bool ascendSort =
752  true) -> boost::optional<std::uint32_t> {
754  std::uint32_t ledgerSeq;
755  std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper(
756  "WHERE LedgerSeq >= " +
758  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
759  " limit 1",
760  app_,
761  false);
762  if (!ledger || ledgerSeq == 0)
763  {
764  JLOG(j_.error()) << "no suitable ledgers were found in"
765  " the SQLite database to import";
766  return boost::none;
767  }
768  return ledgerSeq;
769  };
770 
771  // Find earliest ledger sequence stored
772  auto ledgerSeq{loadLedger()};
773  if (!ledgerSeq)
774  return;
775  earliestIndex = seqToShardIndex(*ledgerSeq);
776 
777  // Consider only complete shards
778  if (ledgerSeq != firstLedgerSeq(earliestIndex))
779  ++earliestIndex;
780 
781  // Find last ledger sequence stored
782  ledgerSeq = loadLedger(false);
783  if (!ledgerSeq)
784  return;
785  latestIndex = seqToShardIndex(*ledgerSeq);
786 
787  // Consider only complete shards
788  if (ledgerSeq != lastLedgerSeq(latestIndex))
789  --latestIndex;
790 
791  if (latestIndex < earliestIndex)
792  {
793  JLOG(j_.error()) << "no suitable ledgers were found in"
794  " the SQLite database to import";
795  return;
796  }
797  }
798 
799  auto numHistShards = this->numHistoricalShards(lock);
800 
801  // Import the shards
802  for (std::uint32_t shardIndex = earliestIndex;
803  shardIndex <= latestIndex;
804  ++shardIndex)
805  {
806  auto const pathDesignation =
807  prepareForNewShard(shardIndex, numHistShards, lock);
808 
809  if (!pathDesignation)
810  break;
811 
812  auto const needsHistoricalPath =
813  *pathDesignation == PathDesignation::historical;
814 
815  // Skip if being acquired
816  if (shardIndex == acquireIndex_)
817  {
818  JLOG(j_.debug())
819  << "shard " << shardIndex << " already being acquired";
820  continue;
821  }
822 
823  // Skip if being imported
824  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
825  {
826  JLOG(j_.debug())
827  << "shard " << shardIndex << " already being imported";
828  continue;
829  }
830 
831  // Skip if stored
832  if (shards_.find(shardIndex) != shards_.end())
833  {
834  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
835  continue;
836  }
837 
838  // Verify SQLite ledgers are in the node store
839  {
840  auto const firstSeq{firstLedgerSeq(shardIndex)};
841  auto const lastSeq{
842  std::max(firstSeq, lastLedgerSeq(shardIndex))};
843  auto const numLedgers{
844  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
845  : ledgersPerShard_};
846  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
847  if (ledgerHashes.size() != numLedgers)
848  continue;
849 
850  bool valid{true};
851  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
852  {
853  if (!source.fetchNodeObject(ledgerHashes[n].first, n))
854  {
855  JLOG(j_.warn()) << "SQLite ledger sequence " << n
856  << " mismatches node store";
857  valid = false;
858  break;
859  }
860  }
861  if (!valid)
862  continue;
863  }
864 
865  auto const path =
866  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
867 
868  // Create the new shard
869  auto shard{
870  std::make_unique<Shard>(app_, *this, shardIndex, path, j_)};
871  if (!shard->init(scheduler_, *ctx_))
872  continue;
873 
874  // Create a marker file to signify an import in progress
875  auto const shardDir{path / std::to_string(shardIndex)};
876  auto const markerFile{shardDir / importMarker_};
877  {
878  std::ofstream ofs{markerFile.string()};
879  if (!ofs.is_open())
880  {
881  JLOG(j_.error()) << "shard " << shardIndex
882  << " failed to create temp marker file";
883  shard->removeOnDestroy();
884  continue;
885  }
886  ofs.close();
887  }
888 
889  // Copy the ledgers from node store
890  std::shared_ptr<Ledger> recentStored;
891  boost::optional<uint256> lastLedgerHash;
892 
893  while (auto const ledgerSeq = shard->prepare())
894  {
895  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
896  if (!ledger || ledger->info().seq != ledgerSeq)
897  break;
898 
899  auto const result{shard->storeLedger(ledger, recentStored)};
900  storeStats(result.count, result.size);
901  if (result.error)
902  break;
903 
904  if (!shard->setLedgerStored(ledger))
905  break;
906 
907  if (!lastLedgerHash && ledgerSeq == lastLedgerSeq(shardIndex))
908  lastLedgerHash = ledger->info().hash;
909 
910  recentStored = std::move(ledger);
911  }
912 
913  using namespace boost::filesystem;
914  bool success{false};
915  if (lastLedgerHash && shard->getState() == Shard::complete)
916  {
917  // Store shard final key
918  Serializer s;
920  s.add32(firstLedgerSeq(shardIndex));
921  s.add32(lastLedgerSeq(shardIndex));
922  s.addBitString(*lastLedgerHash);
923  auto const nodeObject{NodeObject::createObject(
924  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
925 
926  if (shard->storeNodeObject(nodeObject))
927  {
928  try
929  {
930  // The import process is complete and the
931  // marker file is no longer required
932  remove_all(markerFile);
933 
934  JLOG(j_.debug()) << "shard " << shardIndex
935  << " was successfully imported";
937  shards_.emplace(shardIndex, std::move(shard))
938  .first->second,
939  true,
940  boost::none);
941  success = true;
942 
943  if (shardIndex < shardBoundaryIndex())
944  ++numHistShards;
945  }
946  catch (std::exception const& e)
947  {
948  JLOG(j_.fatal()) << "shard index " << shardIndex
949  << ". Exception caught in function "
950  << __func__ << ". Error: " << e.what();
951  }
952  }
953  }
954 
955  if (!success)
956  {
957  JLOG(j_.error())
958  << "shard " << shardIndex << " failed to import";
959  shard->removeOnDestroy();
960  }
961  }
962 
963  updateStatus(lock);
964  }
965 
966  setFileStats();
967 }
968 
971 {
973  {
974  std::lock_guard lock(mutex_);
975  assert(init_);
976 
977  auto const it{shards_.find(acquireIndex_)};
978  if (it == shards_.end())
979  return 0;
980  shard = it->second;
981  }
982 
983  return shard->getWriteLoad();
984 }
985 
986 void
988  NodeObjectType type,
989  Blob&& data,
990  uint256 const& hash,
991  std::uint32_t ledgerSeq)
992 {
993  auto const shardIndex{seqToShardIndex(ledgerSeq)};
995  {
996  std::lock_guard lock(mutex_);
997  if (shardIndex != acquireIndex_)
998  {
999  JLOG(j_.trace())
1000  << "shard " << shardIndex << " is not being acquired";
1001  return;
1002  }
1003 
1004  auto const it{shards_.find(shardIndex)};
1005  if (it == shards_.end())
1006  {
1007  JLOG(j_.error())
1008  << "shard " << shardIndex << " is not being acquired";
1009  return;
1010  }
1011  shard = it->second;
1012  }
1013 
1014  auto const nodeObject{
1015  NodeObject::createObject(type, std::move(data), hash)};
1016  if (shard->storeNodeObject(nodeObject))
1017  storeStats(1, nodeObject->getData().size());
1018 }
1019 
1020 bool
1022  uint256 const& hash,
1023  std::uint32_t ledgerSeq,
1024  std::shared_ptr<NodeObject>& nodeObject)
1025 {
1026  std::shared_ptr<Shard> shard;
1027  {
1028  std::lock_guard lock(mutex_);
1029  assert(init_);
1030 
1031  auto const it{shards_.find(acquireIndex_)};
1032  if (it == shards_.end())
1033  return false;
1034  shard = it->second;
1035  }
1036 
1037  if (shard->fetchNodeObjectFromCache(hash, nodeObject))
1038  return true;
1039 
1040  // Otherwise post a read
1041  Database::asyncFetch(hash, ledgerSeq);
1042  return false;
1043 }
1044 
1045 bool
1047 {
1048  auto const ledgerSeq{srcLedger->info().seq};
1049  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1050  std::shared_ptr<Shard> shard;
1051  {
1052  std::lock_guard lock(mutex_);
1053  assert(init_);
1054 
1055  if (shardIndex != acquireIndex_)
1056  {
1057  JLOG(j_.trace())
1058  << "shard " << shardIndex << " is not being acquired";
1059  return false;
1060  }
1061 
1062  auto const it{shards_.find(shardIndex)};
1063  if (it == shards_.end())
1064  {
1065  JLOG(j_.error())
1066  << "shard " << shardIndex << " is not being acquired";
1067  return false;
1068  }
1069  shard = it->second;
1070  }
1071 
1072  auto const result{shard->storeLedger(srcLedger, nullptr)};
1073  storeStats(result.count, result.size);
1074  if (result.error || result.count == 0 || result.size == 0)
1075  return false;
1076 
1077  return setStoredInShard(shard, srcLedger);
1078 }
1079 
1080 int
1082 {
1083  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1084  std::shared_ptr<Shard> shard;
1085  {
1086  std::lock_guard lock(mutex_);
1087  assert(init_);
1088 
1089  auto const it{shards_.find(shardIndex)};
1090  if (it == shards_.end())
1091  return 0;
1092  shard = it->second;
1093  }
1094 
1095  return shard->getDesiredAsyncReadCount();
1096 }
1097 
1098 float
1100 {
1101  std::shared_ptr<Shard> shard;
1102  {
1103  std::lock_guard lock(mutex_);
1104  assert(init_);
1105 
1106  auto const it{shards_.find(acquireIndex_)};
1107  if (it == shards_.end())
1108  return 0;
1109  shard = it->second;
1110  }
1111 
1112  return shard->getCacheHitRate();
1113 }
1114 
1115 void
1117 {
1119  {
1120  std::lock_guard lock(mutex_);
1121  assert(init_);
1122 
1123  shards.reserve(shards_.size());
1124  for (auto const& e : shards_)
1125  shards.push_back(e.second);
1126  }
1127 
1129  openFinals.reserve(openFinalLimit_);
1130 
1131  for (auto const& e : shards)
1132  {
1133  if (auto const shard{e.lock()}; shard && shard->isOpen())
1134  {
1135  shard->sweep();
1136 
1137  if (shard->getState() == Shard::final)
1138  openFinals.emplace_back(std::move(shard));
1139  }
1140  }
1141 
1142  if (openFinals.size() > openFinalLimit_)
1143  {
1144  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1145  << openFinalLimit_ << " by "
1146  << (openFinals.size() - openFinalLimit_);
1147 
1148  // Try to close enough shards to be within the limit.
1149  // Sort ascending on last use so the oldest are removed first.
1150  std::sort(
1151  openFinals.begin(),
1152  openFinals.end(),
1153  [&](std::shared_ptr<Shard> const& lhsShard,
1154  std::shared_ptr<Shard> const& rhsShard) {
1155  return lhsShard->getLastUse() < rhsShard->getLastUse();
1156  });
1157 
1158  for (auto it{openFinals.cbegin()};
1159  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1160  {
1161  if ((*it)->tryClose())
1162  it = openFinals.erase(it);
1163  else
1164  ++it;
1165  }
1166  }
1167 }
1168 
1169 bool
1171 {
1172  auto fail = [j = j_](std::string const& msg) {
1173  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1174  return false;
1175  };
1176 
1177  Config const& config{app_.config()};
1178  Section const& section{config.section(ConfigSection::shardDatabase())};
1179 
1180  {
1181  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1182  // A custom earliest ledger sequence can be set through the
1183  // configuration file using the 'earliest_seq' field under the
1184  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1185  // have a value greater than zero and be equally assigned in
1186  // both stanzas.
1187 
1188  std::uint32_t shardDBEarliestSeq{0};
1189  get_if_exists<std::uint32_t>(
1190  section, "earliest_seq", shardDBEarliestSeq);
1191 
1192  std::uint32_t nodeDBEarliestSeq{0};
1193  get_if_exists<std::uint32_t>(
1194  config.section(ConfigSection::nodeDatabase()),
1195  "earliest_seq",
1196  nodeDBEarliestSeq);
1197 
1198  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1199  {
1200  return fail(
1201  "and [" + ConfigSection::nodeDatabase() +
1202  "] define different 'earliest_seq' values");
1203  }
1204  }
1205 
1206  using namespace boost::filesystem;
1207  if (!get_if_exists<path>(section, "path", dir_))
1208  return fail("'path' missing");
1209 
1210  {
1211  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1212 
1213  Section const& historicalShardPaths =
1214  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1215 
1216  auto values = historicalShardPaths.values();
1217 
1218  std::sort(values.begin(), values.end());
1219  values.erase(std::unique(values.begin(), values.end()), values.end());
1220 
1221  for (auto const& s : values)
1222  {
1223  auto const dir = path(s);
1224  if (dir_ == dir)
1225  {
1226  return fail(
1227  "the 'path' cannot also be in the "
1228  "'historical_shard_path' section");
1229  }
1230 
1232  }
1233  }
1234 
1235  if (section.exists("ledgers_per_shard"))
1236  {
1237  // To be set only in standalone for testing
1238  if (!config.standalone())
1239  return fail("'ledgers_per_shard' only honored in stand alone");
1240 
1241  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1242  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1243  return fail("'ledgers_per_shard' must be a multiple of 256");
1244 
1247  }
1248 
1249  // NuDB is the default and only supported permanent storage backend
1250  backendName_ = get<std::string>(section, "type", "nudb");
1251  if (!boost::iequals(backendName_, "NuDB"))
1252  return fail("'type' value unsupported");
1253 
1254  return true;
1255 }
1256 
1259  uint256 const& hash,
1260  std::uint32_t ledgerSeq,
1261  FetchReport& fetchReport)
1262 {
1263  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1264  std::shared_ptr<Shard> shard;
1265  {
1266  std::lock_guard lock(mutex_);
1267  auto const it{shards_.find(shardIndex)};
1268  if (it == shards_.end())
1269  return nullptr;
1270  shard = it->second;
1271  }
1272 
1273  return shard->fetchNodeObject(hash, fetchReport);
1274 }
1275 
1276 boost::optional<std::uint32_t>
1278  std::uint32_t validLedgerSeq,
1280 {
1281  if (validLedgerSeq < earliestLedgerSeq())
1282  return boost::none;
1283 
1284  auto const maxShardIndex{[this, validLedgerSeq]() {
1285  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1286  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1287  --shardIndex;
1288  return shardIndex;
1289  }()};
1290  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1291 
1292  // Check if the shard store has all shards
1293  if (shards_.size() >= maxNumShards)
1294  return boost::none;
1295 
1296  if (maxShardIndex < 1024 ||
1297  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1298  {
1299  // Small or mostly full index space to sample
1300  // Find the available indexes and select one at random
1301  std::vector<std::uint32_t> available;
1302  available.reserve(maxNumShards - shards_.size());
1303 
1304  for (auto shardIndex = earliestShardIndex();
1305  shardIndex <= maxShardIndex;
1306  ++shardIndex)
1307  {
1308  if (shards_.find(shardIndex) == shards_.end() &&
1309  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1310  {
1311  available.push_back(shardIndex);
1312  }
1313  }
1314 
1315  if (available.empty())
1316  return boost::none;
1317 
1318  if (available.size() == 1)
1319  return available.front();
1320 
1321  return available[rand_int(
1322  0u, static_cast<std::uint32_t>(available.size() - 1))];
1323  }
1324 
1325  // Large, sparse index space to sample
1326  // Keep choosing indexes at random until an available one is found
1327  // chances of running more than 30 times is less than 1 in a billion
1328  for (int i = 0; i < 40; ++i)
1329  {
1330  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1331  if (shards_.find(shardIndex) == shards_.end() &&
1332  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1333  {
1334  return shardIndex;
1335  }
1336  }
1337 
1338  assert(false);
1339  return boost::none;
1340 }
1341 
1342 void
1344  std::shared_ptr<Shard>& shard,
1345  bool writeSQLite,
1346  boost::optional<uint256> const& expectedHash)
1347 {
1348  taskQueue_->addTask([this,
1349  wptr = std::weak_ptr<Shard>(shard),
1350  writeSQLite,
1351  expectedHash]() {
1352  if (isStopping())
1353  return;
1354 
1355  auto shard{wptr.lock()};
1356  if (!shard)
1357  {
1358  JLOG(j_.debug()) << "Shard removed before being finalized";
1359  return;
1360  }
1361 
1362  if (!shard->finalize(writeSQLite, expectedHash))
1363  {
1364  if (isStopping())
1365  return;
1366 
1367  // Invalid or corrupt shard, remove it
1368  removeFailedShard(shard);
1369  return;
1370  }
1371 
1372  if (isStopping())
1373  return;
1374 
1375  {
1376  auto const boundaryIndex{shardBoundaryIndex()};
1377 
1378  std::lock_guard lock(mutex_);
1379  updateStatus(lock);
1380 
1381  if (shard->index() < boundaryIndex)
1382  {
1383  // This is a historical shard
1384  if (!historicalPaths_.empty() &&
1385  shard->getDir().parent_path() == dir_)
1386  {
1387  // Shard wasn't placed at a separate historical path
1388  JLOG(j_.warn()) << "shard " << shard->index()
1389  << " is not stored at a historical path";
1390  }
1391  }
1392 
1393  else
1394  {
1395  // Not a historical shard. Shift recent shards if necessary
1396  relocateOutdatedShards(lock);
1397  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1398 
1399  auto& recentShard = shard->index() == boundaryIndex
1402 
1403  // Set the appropriate recent shard index
1404  recentShard = shard->index();
1405 
1406  if (shard->getDir().parent_path() != dir_)
1407  {
1408  JLOG(j_.warn()) << "shard " << shard->index()
1409  << " is not stored at the path";
1410  }
1411  }
1412  }
1413 
1414  setFileStats();
1415 
1416  // Update peers with new shard index
1417  if (!app_.config().standalone() &&
1419  {
1420  protocol::TMPeerShardInfo message;
1421  PublicKey const& publicKey{app_.nodeIdentity().first};
1422  message.set_nodepubkey(publicKey.data(), publicKey.size());
1423  message.set_shardindexes(std::to_string(shard->index()));
1424  app_.overlay().foreach(send_always(std::make_shared<Message>(
1425  message, protocol::mtPEER_SHARD_INFO)));
1426  }
1427  });
1428 }
1429 
1430 void
1432 {
1434  {
1435  std::lock_guard lock(mutex_);
1436  if (shards_.empty())
1437  return;
1438 
1439  shards.reserve(shards_.size());
1440  for (auto const& e : shards_)
1441  shards.push_back(e.second);
1442  }
1443 
1444  std::uint64_t sumSz{0};
1445  std::uint32_t sumFd{0};
1446  std::uint32_t numShards{0};
1447  for (auto const& e : shards)
1448  {
1449  if (auto const shard{e.lock()}; shard)
1450  {
1451  auto const [sz, fd] = shard->getFileInfo();
1452  sumSz += sz;
1453  sumFd += fd;
1454  ++numShards;
1455  }
1456  }
1457 
1458  std::lock_guard lock(mutex_);
1459  fileSz_ = sumSz;
1460  fdRequired_ = sumFd;
1461  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1462 
1463  if (!canAdd_)
1464  return;
1465 
1466  if (auto const count = numHistoricalShards(lock);
1467  count >= maxHistoricalShards_)
1468  {
1470  {
1471  // In order to avoid excessive output, don't produce
1472  // this warning if the server isn't configured to
1473  // store historical shards.
1474  JLOG(j_.warn()) << "maximum number of historical shards reached";
1475  }
1476 
1477  canAdd_ = false;
1478  }
1479  else if (!sufficientStorage(
1480  maxHistoricalShards_ - count,
1482  lock))
1483  {
1484  JLOG(j_.warn())
1485  << "maximum shard store size exceeds available storage space";
1486 
1487  canAdd_ = false;
1488  }
1489 }
1490 
1491 void
1493 {
1494  if (!shards_.empty())
1495  {
1497  for (auto const& e : shards_)
1498  if (e.second->getState() == Shard::final)
1499  rs.insert(e.second->index());
1500  status_ = to_string(rs);
1501  }
1502  else
1503  status_.clear();
1504 }
1505 
1506 bool
1508  std::uint32_t numShards,
1509  PathDesignation pathDesignation,
1510  std::lock_guard<std::mutex> const&) const
1511 {
1512  try
1513  {
1514  std::vector<std::uint64_t> capacities;
1515 
1516  if (pathDesignation == PathDesignation::historical &&
1518  {
1519  capacities.reserve(historicalPaths_.size());
1520 
1521  for (auto const& path : historicalPaths_)
1522  {
1523  // Get the available storage for each historical path
1524  auto const availableSpace =
1525  boost::filesystem::space(path).available;
1526 
1527  capacities.push_back(availableSpace);
1528  }
1529  }
1530  else
1531  {
1532  // Get the available storage for the main shard path
1533  capacities.push_back(boost::filesystem::space(dir_).available);
1534  }
1535 
1536  for (std::uint64_t const capacity : capacities)
1537  {
1538  // Leverage all the historical shard paths to
1539  // see if collectively they can fit the specified
1540  // number of shards. For this to work properly,
1541  // each historical path must correspond to a separate
1542  // physical device or filesystem.
1543 
1544  auto const shardCap = capacity / avgShardFileSz_;
1545  if (numShards <= shardCap)
1546  return true;
1547 
1548  numShards -= shardCap;
1549  }
1550  }
1551  catch (std::exception const& e)
1552  {
1553  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1554  << ". Error: " << e.what();
1555  return false;
1556  }
1557 
1558  return false;
1559 }
1560 
1561 bool
1563  std::shared_ptr<Shard>& shard,
1564  std::shared_ptr<Ledger const> const& ledger)
1565 {
1566  if (!shard->setLedgerStored(ledger))
1567  {
1568  // Invalid or corrupt shard, remove it
1569  removeFailedShard(shard);
1570  return false;
1571  }
1572 
1573  if (shard->getState() == Shard::complete)
1574  {
1575  std::lock_guard lock(mutex_);
1576  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1577  {
1578  if (shard->index() == acquireIndex_)
1579  acquireIndex_ = 0;
1580 
1581  finalizeShard(it->second, false, boost::none);
1582  }
1583  else
1584  {
1585  JLOG(j_.debug())
1586  << "shard " << shard->index() << " is no longer being acquired";
1587  }
1588  }
1589 
1590  setFileStats();
1591  return true;
1592 }
1593 
1594 void
1596 {
1597  {
1598  std::lock_guard lock(mutex_);
1599 
1600  if (shard->index() == acquireIndex_)
1601  acquireIndex_ = 0;
1602 
1603  if (shard->index() == latestShardIndex_)
1604  latestShardIndex_ = boost::none;
1605 
1606  if (shard->index() == secondLatestShardIndex_)
1607  secondLatestShardIndex_ = boost::none;
1608 
1609  if ((shards_.erase(shard->index()) > 0) &&
1610  shard->getState() == Shard::final)
1611  {
1612  updateStatus(lock);
1613  }
1614  }
1615 
1616  shard->removeOnDestroy();
1617 
1618  // Reset the shared_ptr to invoke the shard's
1619  // destructor and remove it from the server
1620  shard.reset();
1621  setFileStats();
1622 }
1623 
1626 {
1627  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1628 
1629  if (validIndex < earliestLedgerSeq())
1630  return 0;
1631 
1632  // Shards with an index earlier than the recent shard boundary index
1633  // are considered historical. The three shards at or later than
1634  // this index consist of the two most recently validated shards
1635  // and the shard still in the process of being built by live
1636  // transactions.
1637  return seqToShardIndex(validIndex) - 1;
1638 }
1639 
1642  std::lock_guard<std::mutex> const& lock) const
1643 {
1644  auto const boundaryIndex{shardBoundaryIndex()};
1645  return std::count_if(
1646  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1647  return entry.first < boundaryIndex;
1648  });
1649 }
1650 
1651 void
1653  std::lock_guard<std::mutex> const& lock)
1654 {
1655  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1656  cur || prev)
1657  {
1658  auto const latestShardIndex =
1660 
1661  auto const separateHistoricalPath = !historicalPaths_.empty();
1662 
1663  auto const removeShard =
1664  [this](std::uint32_t const shardIndex) -> void {
1665  canAdd_ = false;
1666 
1667  if (auto it = shards_.find(shardIndex); it != shards_.end())
1668  {
1669  if (it->second)
1670  removeFailedShard(it->second);
1671  else
1672  {
1673  JLOG(j_.warn()) << "can't find shard to remove";
1674  }
1675  }
1676  else
1677  {
1678  JLOG(j_.warn()) << "can't find shard to remove";
1679  }
1680  };
1681 
1682  auto const keepShard =
1683  [this, &lock, removeShard, separateHistoricalPath](
1684  std::uint32_t const shardIndex) -> bool {
1686  {
1687  JLOG(j_.error())
1688  << "maximum number of historical shards reached";
1689 
1690  removeShard(shardIndex);
1691  return false;
1692  }
1693  if (separateHistoricalPath &&
1695  {
1696  JLOG(j_.error()) << "insufficient storage space available";
1697 
1698  removeShard(shardIndex);
1699  return false;
1700  }
1701 
1702  return true;
1703  };
1704 
1705  // Move a shard from the main shard path to a historical shard
1706  // path by copying the contents, and creating a new shard.
1707  auto const moveShard = [this,
1708  &lock](std::uint32_t const shardIndex) -> void {
1709  auto const dst = chooseHistoricalPath(lock);
1710 
1711  if (auto it = shards_.find(shardIndex); it != shards_.end())
1712  {
1713  auto& shard{it->second};
1714 
1715  // Close any open file descriptors before moving the shard
1716  // directory. Don't call removeOnDestroy since that would
1717  // attempt to close the fds after the directory has been moved.
1718  if (!shard->tryClose())
1719  {
1720  JLOG(j_.warn())
1721  << "can't close shard to move to historical path";
1722  return;
1723  }
1724 
1725  try
1726  {
1727  // Move the shard directory to the new path
1728  boost::filesystem::rename(
1729  shard->getDir().string(),
1730  dst / std::to_string(shardIndex));
1731  }
1732  catch (...)
1733  {
1734  JLOG(j_.error()) << "shard " << shardIndex
1735  << " failed to move to historical storage";
1736  return;
1737  }
1738 
1739  // Create a shard instance at the new location
1740  shard =
1741  std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1742 
1743  // Open the new shard
1744  if (!shard->init(scheduler_, *ctx_))
1745  {
1746  JLOG(j_.error()) << "shard " << shardIndex
1747  << " failed to open in historical storage";
1748  shard->removeOnDestroy();
1749  shard.reset();
1750  }
1751  }
1752  else
1753  {
1754  JLOG(j_.warn())
1755  << "can't find shard to move to historical path";
1756  }
1757  };
1758 
1759  // See if either of the recent shards needs to be updated
1760  bool const curNotSynched =
1761  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1762  bool const prevNotSynched = secondLatestShardIndex_ &&
1763  *secondLatestShardIndex_ != latestShardIndex - 1;
1764 
1765  // A new shard has been published. Move outdated
1766  // shards to historical storage as needed
1767  if (curNotSynched || prevNotSynched)
1768  {
1769  if (prev)
1770  {
1771  // Move the formerly second latest shard to historical storage
1772  if (keepShard(*prev) && separateHistoricalPath)
1773  {
1774  moveShard(*prev);
1775  }
1776 
1777  prev = boost::none;
1778  }
1779 
1780  if (cur)
1781  {
1782  // The formerly latest shard is now the second latest
1783  if (cur == latestShardIndex - 1)
1784  {
1785  prev = cur;
1786  }
1787 
1788  // The formerly latest shard is no longer a 'recent' shard
1789  else
1790  {
1791  // Move the formerly latest shard to historical storage
1792  if (keepShard(*cur) && separateHistoricalPath)
1793  {
1794  moveShard(*cur);
1795  }
1796  }
1797 
1798  cur = boost::none;
1799  }
1800  }
1801  }
1802 }
1803 
1804 auto
1806  std::uint32_t shardIndex,
1808  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1809 {
1810  // Any shard earlier than the two most recent shards is a historical shard
1811  auto const boundaryIndex{shardBoundaryIndex()};
1812  auto const isHistoricalShard = shardIndex < boundaryIndex;
1813 
1814  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1817 
1818  // Check shard count and available storage space
1819  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1820  {
1821  JLOG(j_.error()) << "maximum number of historical shards reached";
1822  canAdd_ = false;
1823  return boost::none;
1824  }
1825  if (!sufficientStorage(1, designation, lock))
1826  {
1827  JLOG(j_.error()) << "insufficient storage space available";
1828  canAdd_ = false;
1829  return boost::none;
1830  }
1831 
1832  return designation;
1833 }
1834 
1835 boost::filesystem::path
1837 {
1838  // If not configured with separate historical paths,
1839  // use the main path (dir_) by default.
1840  if (historicalPaths_.empty())
1841  return dir_;
1842 
1843  boost::filesystem::path historicalShardPath;
1844  std::vector<boost::filesystem::path> potentialPaths;
1845 
1846  for (boost::filesystem::path const& path : historicalPaths_)
1847  {
1848  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1849  potentialPaths.push_back(path);
1850  }
1851 
1852  if (potentialPaths.empty())
1853  {
1854  JLOG(j_.error()) << "failed to select a historical shard path";
1855  return "";
1856  }
1857 
1858  std::sample(
1859  potentialPaths.begin(),
1860  potentialPaths.end(),
1861  &historicalShardPath,
1862  1,
1863  default_prng());
1864 
1865  return historicalShardPath;
1866 }
1867 
1868 bool
1870 {
1871 #if BOOST_OS_LINUX
1872  // Each historical shard path must correspond
1873  // to a directory on a distinct device or file system.
1874  // Currently, this constraint is enforced only on Linux.
1877 
1878  for (auto const& path : historicalPaths_)
1879  {
1880  struct statvfs buffer;
1881  if (statvfs(path.c_str(), &buffer))
1882  {
1883  JLOG(j_.error())
1884  << "failed to acquire stats for 'historical_shard_path': "
1885  << path;
1886  return false;
1887  }
1888 
1889  filesystemIDs[buffer.f_fsid].push_back(path.string());
1890  }
1891 
1892  bool ret = true;
1893  for (auto const& entry : filesystemIDs)
1894  {
1895  // Check to see if any of the paths are stored on the same file system
1896  if (entry.second.size() > 1)
1897  {
1898  // Two or more historical storage paths
1899  // correspond to the same file system.
1900  JLOG(j_.error())
1901  << "The following paths correspond to the same filesystem: "
1902  << boost::algorithm::join(entry.second, ", ")
1903  << ". Each configured historical storage path should"
1904  " be on a unique device or filesystem.";
1905 
1906  ret = false;
1907  }
1908  }
1909 
1910  return ret;
1911 
1912 #else
1913  // The requirement that each historical storage path
1914  // corresponds to a distinct device or file system is
1915  // enforced only on Linux, so on other platforms
1916  // keep track of the available capacities for each
1917  // path. Issue a warning if we suspect any of the paths
1918  // may violate this requirement.
1919 
1920  // Map byte counts to each path that shares that byte count.
1922  uniqueCapacities(historicalPaths_.size());
1923 
1924  for (auto const& path : historicalPaths_)
1925  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1926  path.string());
1927 
1928  for (auto const& entry : uniqueCapacities)
1929  {
1930  // Check to see if any paths have the same amount of available bytes.
1931  if (entry.second.size() > 1)
1932  {
1933  // Two or more historical storage paths may
1934  // correspond to the same device or file system.
1935  JLOG(j_.warn())
1936  << "Each of the following paths have " << entry.first
1937  << " bytes free, and may be located on the same device"
1938  " or file system: "
1939  << boost::algorithm::join(entry.second, ", ")
1940  << ". Each configured historical storage path should"
1941  " be on a unique device or file system.";
1942  }
1943  }
1944 #endif
1945 
1946  return true;
1947 }
1948 
1949 //------------------------------------------------------------------------------
1950 
1953  Application& app,
1954  Stoppable& parent,
1955  Scheduler& scheduler,
1956  int readThreads,
1957  beast::Journal j)
1958 {
1959  // The shard store is optional. Future changes will require it.
1960  Section const& section{
1962  if (section.empty())
1963  return nullptr;
1964 
1965  return std::make_unique<DatabaseShardImp>(
1966  app, parent, "ShardStore", scheduler, readThreads, j);
1967 }
1968 
1969 } // namespace NodeStore
1970 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1952
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1135
ripple::Application
Definition: Application.h:97
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:223
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:220
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:108
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:86
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:178
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:228
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:176
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1046
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:53
ripple::NodeStore::Shard::acquire
static constexpr State acquire
Definition: Shard.h:61
std::string
STL class.
std::shared_ptr< Ledger >
ripple::NodeStore::DatabaseShardImp::getDesiredAsyncReadCount
int getDesiredAsyncReadCount(std::uint32_t ledgerSeq) override
Get the maximum number of async reads the node store prefers.
Definition: DatabaseShardImp.cpp:1081
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1227
ripple::SizedItem
SizedItem
Definition: Config.h:48
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::Stoppable::stopped
void stopped()
Called by derived classes to indicate that the stoppable has stopped.
Definition: Stoppable.cpp:72
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:171
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:405
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:249
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:215
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::CashFilter::none
@ none
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:185
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:616
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1492
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:242
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::NodeStore::DatabaseShardImp::asyncFetch
bool asyncFetch(uint256 const &hash, std::uint32_t ledgerSeq, std::shared_ptr< NodeObject > &nodeObject) override
Fetch an object without waiting.
Definition: DatabaseShardImp.cpp:1021
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1604
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1343
boost
Definition: IPAddress.h:117
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:217
ripple::NodeStore::Shard::complete
static constexpr State complete
Definition: Shard.h:62
std::vector::front
T front(T... args)
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:229
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1116
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:298
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:212
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:99
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1258
ripple::NodeStore::DatabaseShardImp::getCacheHitRate
float getCacheHitRate() override
Get the positive cache hits to total attempts ratio.
Definition: DatabaseShardImp.cpp:1099
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:226
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:203
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:414
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:970
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:201
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1836
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1507
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:179
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:678
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:92
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:69
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:197
ripple::Config::standalone
bool standalone() const
Definition: Config.h:249
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1595
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::Stoppable::areChildrenStopped
bool areChildrenStopped() const
Returns true if all children have stopped.
Definition: Stoppable.cpp:66
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:432
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:987
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1869
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1170
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:241
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:241
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1431
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:194
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:61
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:158
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:258
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:191
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:67
ripple::NodeStore::Shard::final
static constexpr State final
Definition: Shard.h:64
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::Database::stopReadThreads
void stopReadThreads()
Definition: Database.cpp:93
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:212
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:177
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1277
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:232
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:734
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:209
ripple::NodeStore::Database::asyncFetch
virtual bool asyncFetch(uint256 const &hash, std::uint32_t ledgerSeq, std::shared_ptr< NodeObject > &nodeObject)=0
Fetch an object without waiting.
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:247
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:538
std::vector::begin
T begin(T... args)
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1641
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1652
ripple::NodeStore::DatabaseShardImp::onChildrenStopped
void onChildrenStopped() override
Override called when all children have stopped.
Definition: DatabaseShardImp.cpp:700
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1625
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:296
std::unique
T unique(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1289
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1562
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:200
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:248
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::shards_
std::unordered_map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:188
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1805
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:687
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:182
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:206