rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
61  , openFinalLimit_(
62  app.config().getValueFor(SizedItem::openFinalLimit, boost::none))
63 {
64 }
65 
66 bool
68 {
69  {
70  std::lock_guard lock(mutex_);
71  if (init_)
72  {
73  JLOG(j_.error()) << "already initialized";
74  return false;
75  }
76 
77  if (!initConfig(lock))
78  {
79  JLOG(j_.error()) << "invalid configuration file settings";
80  return false;
81  }
82 
83  try
84  {
85  using namespace boost::filesystem;
86 
87  // Consolidate the main storage path and all historical paths
88  std::vector<path> paths{dir_};
89  paths.insert(
90  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
91 
92  for (auto const& path : paths)
93  {
94  if (exists(path))
95  {
96  if (!is_directory(path))
97  {
98  JLOG(j_.error()) << path << " must be a directory";
99  return false;
100  }
101  }
102  else if (!create_directories(path))
103  {
104  JLOG(j_.error())
105  << "failed to create path: " + path.string();
106  return false;
107  }
108  }
109 
111  {
112  // Check historical paths for duplicated file systems
113  if (!checkHistoricalPaths())
114  return false;
115  }
116 
117  ctx_ = std::make_unique<nudb::context>();
118  ctx_->start();
119 
120  // Find shards
121  std::uint32_t openFinals{0};
122  for (auto const& path : paths)
123  {
124  for (auto const& it : directory_iterator(path))
125  {
126  // Ignore files
127  if (!is_directory(it))
128  continue;
129 
130  // Ignore nonnumerical directory names
131  auto const shardDir{it.path()};
132  auto dirName{shardDir.stem().string()};
133  if (!std::all_of(
134  dirName.begin(), dirName.end(), [](auto c) {
135  return ::isdigit(static_cast<unsigned char>(c));
136  }))
137  {
138  continue;
139  }
140 
141  // Ignore values below the earliest shard index
142  auto const shardIndex{std::stoul(dirName)};
143  if (shardIndex < earliestShardIndex())
144  {
145  JLOG(j_.debug())
146  << "shard " << shardIndex
147  << " ignored, comes before earliest shard index "
148  << earliestShardIndex();
149  continue;
150  }
151 
152  // Check if a previous import failed
153  if (is_regular_file(shardDir / importMarker_))
154  {
155  JLOG(j_.warn())
156  << "shard " << shardIndex
157  << " previously failed import, removing";
158  remove_all(shardDir);
159  continue;
160  }
161 
162  auto shard{std::make_shared<Shard>(
163  app_, *this, shardIndex, shardDir.parent_path(), j_)};
164  if (!shard->init(scheduler_, *ctx_))
165  {
166  // Remove corrupted or legacy shard
167  shard->removeOnDestroy();
168  JLOG(j_.warn())
169  << "shard " << shardIndex << " removed, "
170  << (shard->isLegacy() ? "legacy" : "corrupted")
171  << " shard";
172  continue;
173  }
174 
175  switch (shard->getState())
176  {
177  case Shard::final:
178  if (++openFinals > openFinalLimit_)
179  shard->tryClose();
180  shards_.emplace(shardIndex, std::move(shard));
181  break;
182 
183  case Shard::complete:
185  shards_.emplace(shardIndex, std::move(shard))
186  .first->second,
187  true,
188  boost::none);
189  break;
190 
191  case Shard::acquire:
192  if (acquireIndex_ != 0)
193  {
194  JLOG(j_.error())
195  << "more than one shard being acquired";
196  return false;
197  }
198 
199  shards_.emplace(shardIndex, std::move(shard));
200  acquireIndex_ = shardIndex;
201  break;
202 
203  default:
204  JLOG(j_.error())
205  << "shard " << shardIndex << " invalid state";
206  return false;
207  }
208  }
209  }
210  }
211  catch (std::exception const& e)
212  {
213  JLOG(j_.fatal()) << "Exception caught in function " << __func__
214  << ". Error: " << e.what();
215  return false;
216  }
217 
218  updateStatus(lock);
220  init_ = true;
221  }
222 
223  setFileStats();
224  return true;
225 }
226 
227 boost::optional<std::uint32_t>
229 {
230  boost::optional<std::uint32_t> shardIndex;
231 
232  {
233  std::lock_guard lock(mutex_);
234  assert(init_);
235 
236  if (acquireIndex_ != 0)
237  {
238  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
239  return it->second->prepare();
240 
241  // Should never get here
242  assert(false);
243  return boost::none;
244  }
245 
246  if (!canAdd_)
247  return boost::none;
248 
249  shardIndex = findAcquireIndex(validLedgerSeq, lock);
250  }
251 
252  if (!shardIndex)
253  {
254  JLOG(j_.debug()) << "no new shards to add";
255  {
256  std::lock_guard lock(mutex_);
257  canAdd_ = false;
258  }
259  return boost::none;
260  }
261 
262  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
263  std::lock_guard lock(mutex_);
264  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
265  }();
266 
267  if (!pathDesignation)
268  return boost::none;
269 
270  auto const needsHistoricalPath =
271  *pathDesignation == PathDesignation::historical;
272 
273  auto shard = [this, shardIndex, needsHistoricalPath] {
274  std::lock_guard lock(mutex_);
275  return std::make_unique<Shard>(
276  app_,
277  *this,
278  *shardIndex,
279  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
280  j_);
281  }();
282 
283  if (!shard->init(scheduler_, *ctx_))
284  return boost::none;
285 
286  auto const ledgerSeq{shard->prepare()};
287  {
288  std::lock_guard lock(mutex_);
289  shards_.emplace(*shardIndex, std::move(shard));
290  acquireIndex_ = *shardIndex;
291  }
292  return ledgerSeq;
293 }
294 
295 bool
297 {
298  auto fail = [j = j_, &shardIndexes](
299  std::string const& msg,
300  boost::optional<std::uint32_t> shardIndex = boost::none) {
301  auto multipleIndexPrequel = [&shardIndexes] {
302  std::vector<std::string> indexesAsString(shardIndexes.size());
304  shardIndexes.begin(),
305  shardIndexes.end(),
306  indexesAsString.begin(),
307  [](uint32_t const index) { return std::to_string(index); });
308 
309  return std::string("shard") +
310  (shardIndexes.size() > 1 ? "s " : " ") +
311  boost::algorithm::join(indexesAsString, ", ");
312  };
313 
314  std::string const prequel = shardIndex
315  ? "shard " + std::to_string(*shardIndex)
316  : multipleIndexPrequel();
317 
318  JLOG(j.error()) << prequel << " " << msg;
319  return false;
320  };
321 
322  std::lock_guard lock(mutex_);
323  assert(init_);
324 
325  if (!canAdd_)
326  return fail("cannot be stored at this time");
327 
328  auto historicalShardsToPrepare = 0;
329 
330  for (auto const shardIndex : shardIndexes)
331  {
332  if (shardIndex < earliestShardIndex())
333  {
334  return fail(
335  "comes before earliest shard index " +
337  shardIndex);
338  }
339 
340  // If we are synced to the network, check if the shard index is
341  // greater or equal to the current or validated shard index.
342  auto seqCheck = [&](std::uint32_t ledgerSeq) {
343  if (ledgerSeq >= earliestLedgerSeq() &&
344  shardIndex >= seqToShardIndex(ledgerSeq))
345  {
346  return fail("invalid index", shardIndex);
347  }
348  return true;
349  };
350  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
352  {
353  return fail("invalid index", shardIndex);
354  }
355 
356  if (shards_.find(shardIndex) != shards_.end())
357  return fail("is already stored", shardIndex);
358 
359  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
360  return fail("is already queued for import", shardIndex);
361 
362  // Any shard earlier than the two most recent shards
363  // is a historical shard
364  if (shardIndex < shardBoundaryIndex())
365  ++historicalShardsToPrepare;
366  }
367 
368  auto const numHistShards = numHistoricalShards(lock);
369 
370  // Check shard count and available storage space
371  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
372  return fail("maximum number of historical shards reached");
373 
374  if (historicalShardsToPrepare)
375  {
376  // Check available storage space for historical shards
377  if (!sufficientStorage(
378  historicalShardsToPrepare, PathDesignation::historical, lock))
379  return fail("insufficient storage space available");
380  }
381 
382  if (auto const recentShardsToPrepare =
383  shardIndexes.size() - historicalShardsToPrepare;
384  recentShardsToPrepare)
385  {
386  // Check available storage space for recent shards
387  if (!sufficientStorage(
388  recentShardsToPrepare, PathDesignation::none, lock))
389  return fail("insufficient storage space available");
390  }
391 
392  for (auto const shardIndex : shardIndexes)
393  {
394  auto const prepareSuccessful =
395  preparedIndexes_.emplace(shardIndex).second;
396 
397  (void)prepareSuccessful;
398  assert(prepareSuccessful);
399  }
400 
401  return true;
402 }
403 
404 void
406 {
407  std::lock_guard lock(mutex_);
408  assert(init_);
409 
410  preparedIndexes_.erase(shardIndex);
411 }
412 
415 {
417  {
418  std::lock_guard lock(mutex_);
419  assert(init_);
420 
421  for (auto const& shardIndex : preparedIndexes_)
422  rs.insert(shardIndex);
423  }
424 
425  if (rs.empty())
426  return {};
427 
428  return to_string(rs);
429 };
430 
431 bool
433  std::uint32_t shardIndex,
434  boost::filesystem::path const& srcDir)
435 {
436  auto fail = [&](std::string const& msg,
437  std::lock_guard<std::mutex> const& lock) {
438  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
439 
440  // Remove the failed import shard index so it can be retried
441  preparedIndexes_.erase(shardIndex);
442  return false;
443  };
444 
445  using namespace boost::filesystem;
446  try
447  {
448  if (!is_directory(srcDir) || is_empty(srcDir))
449  {
450  return fail(
451  "invalid source directory " + srcDir.string(),
453  }
454  }
455  catch (std::exception const& e)
456  {
457  return fail(
458  std::string(". Exception caught in function ") + __func__ +
459  ". Error: " + e.what(),
461  }
462 
463  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
465  if (!expectedHash)
466  return fail("expected hash not found", std::lock_guard(mutex_));
467 
468  path dstDir;
469  {
470  std::lock_guard lock(mutex_);
471  if (shards_.find(shardIndex) != shards_.end())
472  return fail("already exists", lock);
473 
474  // Check shard was prepared for import
475  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
476  return fail("was not prepared for import", lock);
477 
478  auto const pathDesignation{
479  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
480  if (!pathDesignation)
481  return fail("failed to import", lock);
482 
483  if (*pathDesignation == PathDesignation::historical)
484  dstDir = chooseHistoricalPath(lock);
485  else
486  dstDir = dir_;
487  }
488  dstDir /= std::to_string(shardIndex);
489 
490  auto renameDir = [&](path const& src, path const& dst) {
491  try
492  {
493  rename(src, dst);
494  }
495  catch (std::exception const& e)
496  {
497  return fail(
498  std::string(". Exception caught in function ") + __func__ +
499  ". Error: " + e.what(),
501  }
502  return true;
503  };
504 
505  // Rename source directory to the shard database directory
506  if (!renameDir(srcDir, dstDir))
507  return false;
508 
509  // Create the new shard
510  auto shard{std::make_unique<Shard>(
511  app_, *this, shardIndex, dstDir.parent_path(), j_)};
512 
513  if (!shard->init(scheduler_, *ctx_) || shard->getState() != Shard::complete)
514  {
515  shard.reset();
516  renameDir(dstDir, srcDir);
517  return fail("failed to import", std::lock_guard(mutex_));
518  }
519 
520  auto const [it, inserted] = [&]() {
521  std::lock_guard lock(mutex_);
522  preparedIndexes_.erase(shardIndex);
523  return shards_.emplace(shardIndex, std::move(shard));
524  }();
525 
526  if (!inserted)
527  {
528  shard.reset();
529  renameDir(dstDir, srcDir);
530  return fail("failed to import", std::lock_guard(mutex_));
531  }
532 
533  finalizeShard(it->second, true, expectedHash);
534  return true;
535 }
536 
539 {
540  auto const shardIndex{seqToShardIndex(ledgerSeq)};
541  {
543  {
544  std::lock_guard lock(mutex_);
545  assert(init_);
546 
547  auto const it{shards_.find(shardIndex)};
548  if (it == shards_.end())
549  return nullptr;
550  shard = it->second;
551  }
552 
553  // Ledger must be stored in a final or acquiring shard
554  switch (shard->getState())
555  {
556  case Shard::final:
557  break;
558  case Shard::acquire:
559  if (shard->containsLedger(ledgerSeq))
560  break;
561  [[fallthrough]];
562  default:
563  return nullptr;
564  }
565  }
566 
567  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
568  if (!nodeObject)
569  return nullptr;
570 
571  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
572  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
573  return nullptr;
574  };
575 
576  auto ledger{std::make_shared<Ledger>(
577  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
578  app_.config(),
579  *app_.getShardFamily())};
580 
581  if (ledger->info().seq != ledgerSeq)
582  {
583  return fail(
584  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
585  }
586  if (ledger->info().hash != hash)
587  {
588  return fail(
589  "encountered invalid ledger hash " + to_string(hash) +
590  " on sequence " + std::to_string(ledgerSeq));
591  }
592 
593  ledger->setFull();
594  if (!ledger->stateMap().fetchRoot(
595  SHAMapHash{ledger->info().accountHash}, nullptr))
596  {
597  return fail(
598  "is missing root STATE node on hash " + to_string(hash) +
599  " on sequence " + std::to_string(ledgerSeq));
600  }
601 
602  if (ledger->info().txHash.isNonZero())
603  {
604  if (!ledger->txMap().fetchRoot(
605  SHAMapHash{ledger->info().txHash}, nullptr))
606  {
607  return fail(
608  "is missing root TXN node on hash " + to_string(hash) +
609  " on sequence " + std::to_string(ledgerSeq));
610  }
611  }
612  return ledger;
613 }
614 
615 void
617 {
618  auto const ledgerSeq{ledger->info().seq};
619  if (ledger->info().hash.isZero())
620  {
621  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
622  << ledgerSeq;
623  return;
624  }
625  if (ledger->info().accountHash.isZero())
626  {
627  JLOG(j_.error()) << "zero account hash for ledger sequence "
628  << ledgerSeq;
629  return;
630  }
631  if (ledger->stateMap().getHash().isNonZero() &&
632  !ledger->stateMap().isValid())
633  {
634  JLOG(j_.error()) << "invalid state map for ledger sequence "
635  << ledgerSeq;
636  return;
637  }
638  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
639  {
640  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
641  << ledgerSeq;
642  return;
643  }
644 
645  auto const shardIndex{seqToShardIndex(ledgerSeq)};
647  {
648  std::lock_guard lock(mutex_);
649  assert(init_);
650 
651  if (shardIndex != acquireIndex_)
652  {
653  JLOG(j_.trace())
654  << "shard " << shardIndex << " is not being acquired";
655  return;
656  }
657 
658  auto const it{shards_.find(shardIndex)};
659  if (it == shards_.end())
660  {
661  JLOG(j_.error())
662  << "shard " << shardIndex << " is not being acquired";
663  return;
664  }
665  shard = it->second;
666  }
667 
668  if (shard->containsLedger(ledgerSeq))
669  {
670  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
671  return;
672  }
673 
674  setStoredInShard(shard, ledger);
675 }
676 
679 {
680  std::lock_guard lock(mutex_);
681  assert(init_);
682 
683  return status_;
684 }
685 
686 void
688 {
689  // Stop read threads in base before data members are destroyed
690  stopReadThreads();
691 
692  std::lock_guard lock(mutex_);
693 
694  // Notify shards to stop
695  for (auto const& e : shards_)
696  e.second->stop();
697 }
698 
699 void
701 {
703  {
704  std::lock_guard lock(mutex_);
705 
706  shards.reserve(shards_.size());
707  for (auto const& e : shards_)
708  shards.push_back(e.second);
709  shards_.clear();
710  }
711 
712  // All shards should be expired at this point
713  for (auto const& e : shards)
714  {
715  if (!e.expired())
716  {
717  std::string shardIndex;
718  if (auto const shard{e.lock()}; shard)
719  shardIndex = std::to_string(shard->index());
720 
721  JLOG(j_.warn()) << " shard " << shardIndex << " unexpired";
722  }
723  }
724 
725  if (areChildrenStopped())
726  stopped();
727  else
728  {
729  JLOG(j_.warn()) << " Children failed to stop";
730  }
731 }
732 
733 void
735 {
736  {
737  std::lock_guard lock(mutex_);
738  assert(init_);
739 
740  // Only the application local node store can be imported
741  if (&source != &app_.getNodeStore())
742  {
743  assert(false);
744  JLOG(j_.error()) << "invalid source database";
745  return;
746  }
747 
748  std::uint32_t earliestIndex;
749  std::uint32_t latestIndex;
750  {
751  auto loadLedger = [&](bool ascendSort =
752  true) -> boost::optional<std::uint32_t> {
754  std::uint32_t ledgerSeq;
755  std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper(
756  "WHERE LedgerSeq >= " +
758  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
759  " limit 1",
760  app_,
761  false);
762  if (!ledger || ledgerSeq == 0)
763  {
764  JLOG(j_.error()) << "no suitable ledgers were found in"
765  " the SQLite database to import";
766  return boost::none;
767  }
768  return ledgerSeq;
769  };
770 
771  // Find earliest ledger sequence stored
772  auto ledgerSeq{loadLedger()};
773  if (!ledgerSeq)
774  return;
775  earliestIndex = seqToShardIndex(*ledgerSeq);
776 
777  // Consider only complete shards
778  if (ledgerSeq != firstLedgerSeq(earliestIndex))
779  ++earliestIndex;
780 
781  // Find last ledger sequence stored
782  ledgerSeq = loadLedger(false);
783  if (!ledgerSeq)
784  return;
785  latestIndex = seqToShardIndex(*ledgerSeq);
786 
787  // Consider only complete shards
788  if (ledgerSeq != lastLedgerSeq(latestIndex))
789  --latestIndex;
790 
791  if (latestIndex < earliestIndex)
792  {
793  JLOG(j_.error()) << "no suitable ledgers were found in"
794  " the SQLite database to import";
795  return;
796  }
797  }
798 
799  auto numHistShards = this->numHistoricalShards(lock);
800 
801  // Import the shards
802  for (std::uint32_t shardIndex = earliestIndex;
803  shardIndex <= latestIndex;
804  ++shardIndex)
805  {
806  auto const pathDesignation =
807  prepareForNewShard(shardIndex, numHistShards, lock);
808 
809  if (!pathDesignation)
810  break;
811 
812  auto const needsHistoricalPath =
813  *pathDesignation == PathDesignation::historical;
814 
815  // Skip if being acquired
816  if (shardIndex == acquireIndex_)
817  {
818  JLOG(j_.debug())
819  << "shard " << shardIndex << " already being acquired";
820  continue;
821  }
822 
823  // Skip if being imported
824  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
825  {
826  JLOG(j_.debug())
827  << "shard " << shardIndex << " already being imported";
828  continue;
829  }
830 
831  // Skip if stored
832  if (shards_.find(shardIndex) != shards_.end())
833  {
834  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
835  continue;
836  }
837 
838  // Verify SQLite ledgers are in the node store
839  {
840  auto const firstSeq{firstLedgerSeq(shardIndex)};
841  auto const lastSeq{
842  std::max(firstSeq, lastLedgerSeq(shardIndex))};
843  auto const numLedgers{
844  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
845  : ledgersPerShard_};
846  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
847  if (ledgerHashes.size() != numLedgers)
848  continue;
849 
850  bool valid{true};
851  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
852  {
853  if (!source.fetchNodeObject(ledgerHashes[n].first, n))
854  {
855  JLOG(j_.warn()) << "SQLite ledger sequence " << n
856  << " mismatches node store";
857  valid = false;
858  break;
859  }
860  }
861  if (!valid)
862  continue;
863  }
864 
865  auto const path =
866  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
867 
868  // Create the new shard
869  auto shard{
870  std::make_unique<Shard>(app_, *this, shardIndex, path, j_)};
871  if (!shard->init(scheduler_, *ctx_))
872  continue;
873 
874  // Create a marker file to signify an import in progress
875  auto const shardDir{path / std::to_string(shardIndex)};
876  auto const markerFile{shardDir / importMarker_};
877  {
878  std::ofstream ofs{markerFile.string()};
879  if (!ofs.is_open())
880  {
881  JLOG(j_.error()) << "shard " << shardIndex
882  << " failed to create temp marker file";
883  shard->removeOnDestroy();
884  continue;
885  }
886  ofs.close();
887  }
888 
889  // Copy the ledgers from node store
890  std::shared_ptr<Ledger> recentStored;
891  boost::optional<uint256> lastLedgerHash;
892 
893  while (auto const ledgerSeq = shard->prepare())
894  {
895  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
896  if (!ledger || ledger->info().seq != ledgerSeq)
897  break;
898 
899  auto const result{shard->storeLedger(ledger, recentStored)};
900  storeStats(result.count, result.size);
901  if (result.error)
902  break;
903 
904  if (!shard->setLedgerStored(ledger))
905  break;
906 
907  if (!lastLedgerHash && ledgerSeq == lastLedgerSeq(shardIndex))
908  lastLedgerHash = ledger->info().hash;
909 
910  recentStored = std::move(ledger);
911  }
912 
913  using namespace boost::filesystem;
914  bool success{false};
915  if (lastLedgerHash && shard->getState() == Shard::complete)
916  {
917  // Store shard final key
918  Serializer s;
920  s.add32(firstLedgerSeq(shardIndex));
921  s.add32(lastLedgerSeq(shardIndex));
922  s.addBitString(*lastLedgerHash);
923  auto const nodeObject{NodeObject::createObject(
924  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
925 
926  if (shard->storeNodeObject(nodeObject))
927  {
928  try
929  {
930  // The import process is complete and the
931  // marker file is no longer required
932  remove_all(markerFile);
933 
934  JLOG(j_.debug()) << "shard " << shardIndex
935  << " was successfully imported";
937  shards_.emplace(shardIndex, std::move(shard))
938  .first->second,
939  true,
940  boost::none);
941  success = true;
942  }
943  catch (std::exception const& e)
944  {
945  JLOG(j_.fatal()) << "shard index " << shardIndex
946  << ". Exception caught in function "
947  << __func__ << ". Error: " << e.what();
948  }
949  }
950  }
951 
952  if (!success)
953  {
954  JLOG(j_.error())
955  << "shard " << shardIndex << " failed to import";
956  shard->removeOnDestroy();
957  }
958  }
959 
960  updateStatus(lock);
961  }
962 
963  setFileStats();
964 }
965 
968 {
970  {
971  std::lock_guard lock(mutex_);
972  assert(init_);
973 
974  auto const it{shards_.find(acquireIndex_)};
975  if (it == shards_.end())
976  return 0;
977  shard = it->second;
978  }
979 
980  return shard->getWriteLoad();
981 }
982 
983 void
985  NodeObjectType type,
986  Blob&& data,
987  uint256 const& hash,
988  std::uint32_t ledgerSeq)
989 {
990  auto const shardIndex{seqToShardIndex(ledgerSeq)};
992  {
993  std::lock_guard lock(mutex_);
994  if (shardIndex != acquireIndex_)
995  {
996  JLOG(j_.trace())
997  << "shard " << shardIndex << " is not being acquired";
998  return;
999  }
1000 
1001  auto const it{shards_.find(shardIndex)};
1002  if (it == shards_.end())
1003  {
1004  JLOG(j_.error())
1005  << "shard " << shardIndex << " is not being acquired";
1006  return;
1007  }
1008  shard = it->second;
1009  }
1010 
1011  auto const nodeObject{
1012  NodeObject::createObject(type, std::move(data), hash)};
1013  if (shard->storeNodeObject(nodeObject))
1014  storeStats(1, nodeObject->getData().size());
1015 }
1016 
1017 bool
1019  uint256 const& hash,
1020  std::uint32_t ledgerSeq,
1021  std::shared_ptr<NodeObject>& nodeObject)
1022 {
1023  std::shared_ptr<Shard> shard;
1024  {
1025  std::lock_guard lock(mutex_);
1026  assert(init_);
1027 
1028  auto const it{shards_.find(acquireIndex_)};
1029  if (it == shards_.end())
1030  return false;
1031  shard = it->second;
1032  }
1033 
1034  if (shard->fetchNodeObjectFromCache(hash, nodeObject))
1035  return true;
1036 
1037  // Otherwise post a read
1038  Database::asyncFetch(hash, ledgerSeq);
1039  return false;
1040 }
1041 
1042 bool
1044 {
1045  auto const ledgerSeq{srcLedger->info().seq};
1046  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1047  std::shared_ptr<Shard> shard;
1048  {
1049  std::lock_guard lock(mutex_);
1050  assert(init_);
1051 
1052  if (shardIndex != acquireIndex_)
1053  {
1054  JLOG(j_.trace())
1055  << "shard " << shardIndex << " is not being acquired";
1056  return false;
1057  }
1058 
1059  auto const it{shards_.find(shardIndex)};
1060  if (it == shards_.end())
1061  {
1062  JLOG(j_.error())
1063  << "shard " << shardIndex << " is not being acquired";
1064  return false;
1065  }
1066  shard = it->second;
1067  }
1068 
1069  auto const result{shard->storeLedger(srcLedger, nullptr)};
1070  storeStats(result.count, result.size);
1071  if (result.error || result.count == 0 || result.size == 0)
1072  return false;
1073 
1074  return setStoredInShard(shard, srcLedger);
1075 }
1076 
1077 int
1079 {
1080  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1081  std::shared_ptr<Shard> shard;
1082  {
1083  std::lock_guard lock(mutex_);
1084  assert(init_);
1085 
1086  auto const it{shards_.find(shardIndex)};
1087  if (it == shards_.end())
1088  return 0;
1089  shard = it->second;
1090  }
1091 
1092  return shard->getDesiredAsyncReadCount();
1093 }
1094 
1095 float
1097 {
1098  std::shared_ptr<Shard> shard;
1099  {
1100  std::lock_guard lock(mutex_);
1101  assert(init_);
1102 
1103  auto const it{shards_.find(acquireIndex_)};
1104  if (it == shards_.end())
1105  return 0;
1106  shard = it->second;
1107  }
1108 
1109  return shard->getCacheHitRate();
1110 }
1111 
1112 void
1114 {
1116  {
1117  std::lock_guard lock(mutex_);
1118  assert(init_);
1119 
1120  shards.reserve(shards_.size());
1121  for (auto const& e : shards_)
1122  shards.push_back(e.second);
1123  }
1124 
1126  openFinals.reserve(openFinalLimit_);
1127 
1128  for (auto const& e : shards)
1129  {
1130  if (auto const shard{e.lock()}; shard && shard->isOpen())
1131  {
1132  shard->sweep();
1133 
1134  if (shard->getState() == Shard::final)
1135  openFinals.emplace_back(std::move(shard));
1136  }
1137  }
1138 
1139  if (openFinals.size() > openFinalLimit_)
1140  {
1141  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1142  << openFinalLimit_ << " by "
1143  << (openFinals.size() - openFinalLimit_);
1144 
1145  // Try to close enough shards to be within the limit.
1146  // Sort ascending on last use so the oldest are removed first.
1147  std::sort(
1148  openFinals.begin(),
1149  openFinals.end(),
1150  [&](std::shared_ptr<Shard> const& lhsShard,
1151  std::shared_ptr<Shard> const& rhsShard) {
1152  return lhsShard->getLastUse() < rhsShard->getLastUse();
1153  });
1154 
1155  for (auto it{openFinals.cbegin()};
1156  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1157  {
1158  if ((*it)->tryClose())
1159  it = openFinals.erase(it);
1160  else
1161  ++it;
1162  }
1163  }
1164 }
1165 
1166 bool
1168 {
1169  auto fail = [j = j_](std::string const& msg) {
1170  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1171  return false;
1172  };
1173 
1174  Config const& config{app_.config()};
1175  Section const& section{config.section(ConfigSection::shardDatabase())};
1176 
1177  {
1178  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1179  // A custom earliest ledger sequence can be set through the
1180  // configuration file using the 'earliest_seq' field under the
1181  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1182  // have a value greater than zero and be equally assigned in
1183  // both stanzas.
1184 
1185  std::uint32_t shardDBEarliestSeq{0};
1186  get_if_exists<std::uint32_t>(
1187  section, "earliest_seq", shardDBEarliestSeq);
1188 
1189  std::uint32_t nodeDBEarliestSeq{0};
1190  get_if_exists<std::uint32_t>(
1191  config.section(ConfigSection::nodeDatabase()),
1192  "earliest_seq",
1193  nodeDBEarliestSeq);
1194 
1195  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1196  {
1197  return fail(
1198  "and [" + ConfigSection::nodeDatabase() +
1199  "] define different 'earliest_seq' values");
1200  }
1201  }
1202 
1203  using namespace boost::filesystem;
1204  if (!get_if_exists<path>(section, "path", dir_))
1205  return fail("'path' missing");
1206 
1207  {
1208  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1209 
1210  Section const& historicalShardPaths =
1211  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1212 
1213  auto values = historicalShardPaths.values();
1214 
1215  std::sort(values.begin(), values.end());
1216  values.erase(std::unique(values.begin(), values.end()), values.end());
1217 
1218  for (auto const& s : values)
1219  {
1220  auto const dir = path(s);
1221  if (dir_ == dir)
1222  {
1223  return fail(
1224  "the 'path' cannot also be in the "
1225  "'historical_shard_path' section");
1226  }
1227 
1229  }
1230  }
1231 
1232  if (section.exists("ledgers_per_shard"))
1233  {
1234  // To be set only in standalone for testing
1235  if (!config.standalone())
1236  return fail("'ledgers_per_shard' only honored in stand alone");
1237 
1238  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1239  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1240  return fail("'ledgers_per_shard' must be a multiple of 256");
1241 
1244  }
1245 
1246  // NuDB is the default and only supported permanent storage backend
1247  backendName_ = get<std::string>(section, "type", "nudb");
1248  if (!boost::iequals(backendName_, "NuDB"))
1249  return fail("'type' value unsupported");
1250 
1251  return true;
1252 }
1253 
1256  uint256 const& hash,
1257  std::uint32_t ledgerSeq,
1258  FetchReport& fetchReport)
1259 {
1260  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1261  std::shared_ptr<Shard> shard;
1262  {
1263  std::lock_guard lock(mutex_);
1264  auto const it{shards_.find(shardIndex)};
1265  if (it == shards_.end())
1266  return nullptr;
1267  shard = it->second;
1268  }
1269 
1270  return shard->fetchNodeObject(hash, fetchReport);
1271 }
1272 
1273 boost::optional<std::uint32_t>
1275  std::uint32_t validLedgerSeq,
1277 {
1278  if (validLedgerSeq < earliestLedgerSeq())
1279  return boost::none;
1280 
1281  auto const maxShardIndex{[this, validLedgerSeq]() {
1282  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1283  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1284  --shardIndex;
1285  return shardIndex;
1286  }()};
1287  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1288 
1289  // Check if the shard store has all shards
1290  if (shards_.size() >= maxNumShards)
1291  return boost::none;
1292 
1293  if (maxShardIndex < 1024 ||
1294  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1295  {
1296  // Small or mostly full index space to sample
1297  // Find the available indexes and select one at random
1298  std::vector<std::uint32_t> available;
1299  available.reserve(maxNumShards - shards_.size());
1300 
1301  for (auto shardIndex = earliestShardIndex();
1302  shardIndex <= maxShardIndex;
1303  ++shardIndex)
1304  {
1305  if (shards_.find(shardIndex) == shards_.end() &&
1306  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1307  {
1308  available.push_back(shardIndex);
1309  }
1310  }
1311 
1312  if (available.empty())
1313  return boost::none;
1314 
1315  if (available.size() == 1)
1316  return available.front();
1317 
1318  return available[rand_int(
1319  0u, static_cast<std::uint32_t>(available.size() - 1))];
1320  }
1321 
1322  // Large, sparse index space to sample
1323  // Keep choosing indexes at random until an available one is found
1324  // chances of running more than 30 times is less than 1 in a billion
1325  for (int i = 0; i < 40; ++i)
1326  {
1327  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1328  if (shards_.find(shardIndex) == shards_.end() &&
1329  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1330  {
1331  return shardIndex;
1332  }
1333  }
1334 
1335  assert(false);
1336  return boost::none;
1337 }
1338 
1339 void
1341  std::shared_ptr<Shard>& shard,
1342  bool writeSQLite,
1343  boost::optional<uint256> const& expectedHash)
1344 {
1345  taskQueue_->addTask([this,
1346  wptr = std::weak_ptr<Shard>(shard),
1347  writeSQLite,
1348  expectedHash]() {
1349  if (isStopping())
1350  return;
1351 
1352  auto shard{wptr.lock()};
1353  if (!shard)
1354  {
1355  JLOG(j_.debug()) << "Shard removed before being finalized";
1356  return;
1357  }
1358 
1359  if (!shard->finalize(writeSQLite, expectedHash))
1360  {
1361  if (isStopping())
1362  return;
1363 
1364  // Invalid or corrupt shard, remove it
1365  removeFailedShard(shard);
1366  return;
1367  }
1368 
1369  if (isStopping())
1370  return;
1371 
1372  {
1373  auto const boundaryIndex{shardBoundaryIndex()};
1374 
1375  std::lock_guard lock(mutex_);
1376  updateStatus(lock);
1377 
1378  if (shard->index() < boundaryIndex)
1379  {
1380  // This is a historical shard
1381  if (!historicalPaths_.empty() &&
1382  shard->getDir().parent_path() == dir_)
1383  {
1384  // Shard wasn't placed at a separate historical path
1385  JLOG(j_.warn()) << "shard " << shard->index()
1386  << " is not stored at a historical path";
1387  }
1388  }
1389 
1390  else
1391  {
1392  // Not a historical shard. Shift recent shards if necessary
1393  relocateOutdatedShards(lock);
1394  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1395 
1396  auto& recentShard = shard->index() == boundaryIndex
1399 
1400  // Set the appropriate recent shard index
1401  recentShard = shard->index();
1402 
1403  if (shard->getDir().parent_path() != dir_)
1404  {
1405  JLOG(j_.warn()) << "shard " << shard->index()
1406  << " is not stored at the path";
1407  }
1408  }
1409  }
1410 
1411  setFileStats();
1412 
1413  // Update peers with new shard index
1414  if (!app_.config().standalone() &&
1416  {
1417  protocol::TMPeerShardInfo message;
1418  PublicKey const& publicKey{app_.nodeIdentity().first};
1419  message.set_nodepubkey(publicKey.data(), publicKey.size());
1420  message.set_shardindexes(std::to_string(shard->index()));
1421  app_.overlay().foreach(send_always(std::make_shared<Message>(
1422  message, protocol::mtPEER_SHARD_INFO)));
1423  }
1424  });
1425 }
1426 
1427 void
1429 {
1431  {
1432  std::lock_guard lock(mutex_);
1433  if (shards_.empty())
1434  return;
1435 
1436  shards.reserve(shards_.size());
1437  for (auto const& e : shards_)
1438  shards.push_back(e.second);
1439  }
1440 
1441  std::uint64_t sumSz{0};
1442  std::uint32_t sumFd{0};
1443  std::uint32_t numShards{0};
1444  for (auto const& e : shards)
1445  {
1446  if (auto const shard{e.lock()}; shard)
1447  {
1448  auto const [sz, fd] = shard->getFileInfo();
1449  sumSz += sz;
1450  sumFd += fd;
1451  ++numShards;
1452  }
1453  }
1454 
1455  std::lock_guard lock(mutex_);
1456  fileSz_ = sumSz;
1457  fdRequired_ = sumFd;
1458  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1459 
1460  if (auto const count = numHistoricalShards(lock);
1461  count >= maxHistoricalShards_)
1462  {
1463  JLOG(j_.warn()) << "maximum number of historical shards reached";
1464  canAdd_ = false;
1465  }
1466  else if (!sufficientStorage(
1467  maxHistoricalShards_ - count,
1469  lock))
1470  {
1471  JLOG(j_.warn())
1472  << "maximum shard store size exceeds available storage space";
1473  }
1474 }
1475 
1476 void
1478 {
1479  if (!shards_.empty())
1480  {
1482  for (auto const& e : shards_)
1483  if (e.second->getState() == Shard::final)
1484  rs.insert(e.second->index());
1485  status_ = to_string(rs);
1486  }
1487  else
1488  status_.clear();
1489 }
1490 
1491 bool
1493  std::uint32_t numShards,
1494  PathDesignation pathDesignation,
1495  std::lock_guard<std::mutex> const&) const
1496 {
1497  try
1498  {
1499  std::vector<std::uint64_t> capacities;
1500 
1501  if (pathDesignation == PathDesignation::historical &&
1503  {
1504  capacities.reserve(historicalPaths_.size());
1505 
1506  for (auto const& path : historicalPaths_)
1507  {
1508  // Get the available storage for each historical path
1509  auto const availableSpace =
1510  boost::filesystem::space(path).available;
1511 
1512  capacities.push_back(availableSpace);
1513  }
1514  }
1515  else
1516  {
1517  // Get the available storage for the main shard path
1518  capacities.push_back(boost::filesystem::space(dir_).available);
1519  }
1520 
1521  for (std::uint64_t const capacity : capacities)
1522  {
1523  // Leverage all the historical shard paths to
1524  // see if collectively they can fit the specified
1525  // number of shards. For this to work properly,
1526  // each historical path must correspond to a separate
1527  // physical device or filesystem.
1528 
1529  auto const shardCap = capacity / avgShardFileSz_;
1530  if (numShards <= shardCap)
1531  return true;
1532 
1533  numShards -= shardCap;
1534  }
1535  }
1536  catch (std::exception const& e)
1537  {
1538  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1539  << ". Error: " << e.what();
1540  return false;
1541  }
1542 
1543  return false;
1544 }
1545 
1546 bool
1548  std::shared_ptr<Shard>& shard,
1549  std::shared_ptr<Ledger const> const& ledger)
1550 {
1551  if (!shard->setLedgerStored(ledger))
1552  {
1553  // Invalid or corrupt shard, remove it
1554  removeFailedShard(shard);
1555  return false;
1556  }
1557 
1558  if (shard->getState() == Shard::complete)
1559  {
1560  std::lock_guard lock(mutex_);
1561  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1562  {
1563  if (shard->index() == acquireIndex_)
1564  acquireIndex_ = 0;
1565 
1566  finalizeShard(it->second, false, boost::none);
1567  }
1568  else
1569  {
1570  JLOG(j_.debug())
1571  << "shard " << shard->index() << " is no longer being acquired";
1572  }
1573  }
1574 
1575  setFileStats();
1576  return true;
1577 }
1578 
1579 void
1581 {
1582  {
1583  std::lock_guard lock(mutex_);
1584 
1585  if (shard->index() == acquireIndex_)
1586  acquireIndex_ = 0;
1587 
1588  if (shard->index() == latestShardIndex_)
1589  latestShardIndex_ = boost::none;
1590 
1591  if (shard->index() == secondLatestShardIndex_)
1592  secondLatestShardIndex_ = boost::none;
1593 
1594  if ((shards_.erase(shard->index()) > 0) &&
1595  shard->getState() == Shard::final)
1596  {
1597  updateStatus(lock);
1598  }
1599  }
1600 
1601  shard->removeOnDestroy();
1602 
1603  // Reset the shared_ptr to invoke the shard's
1604  // destructor and remove it from the server
1605  shard.reset();
1606  setFileStats();
1607 }
1608 
1611 {
1612  // Shards with an index earlier than the recent shard boundary index
1613  // are considered historical. The three shards at or later than
1614  // this index consist of the two most recently validated shards
1615  // and the shard still in the process of being built by live
1616  // transactions.
1619  1;
1620 }
1621 
1624  std::lock_guard<std::mutex> const& lock) const
1625 {
1626  auto const boundaryIndex{shardBoundaryIndex()};
1627  return std::count_if(
1628  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1629  return entry.first < boundaryIndex;
1630  });
1631 }
1632 
1633 void
1635  std::lock_guard<std::mutex> const& lock)
1636 {
1637  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1638  cur || prev)
1639  {
1640  auto const latestShardIndex =
1642 
1643  auto const separateHistoricalPath = !historicalPaths_.empty();
1644 
1645  auto const removeShard =
1646  [this](std::uint32_t const shardIndex) -> void {
1647  canAdd_ = false;
1648 
1649  if (auto it = shards_.find(shardIndex); it != shards_.end())
1650  {
1651  if (it->second)
1652  removeFailedShard(it->second);
1653  else
1654  {
1655  JLOG(j_.warn()) << "can't find shard to remove";
1656  }
1657  }
1658  else
1659  {
1660  JLOG(j_.warn()) << "can't find shard to remove";
1661  }
1662  };
1663 
1664  auto const keepShard =
1665  [this, &lock, removeShard, separateHistoricalPath](
1666  std::uint32_t const shardIndex) -> bool {
1668  {
1669  JLOG(j_.error())
1670  << "maximum number of historical shards reached";
1671 
1672  removeShard(shardIndex);
1673  return false;
1674  }
1675  if (separateHistoricalPath &&
1677  {
1678  JLOG(j_.error()) << "insufficient storage space available";
1679 
1680  removeShard(shardIndex);
1681  return false;
1682  }
1683 
1684  return true;
1685  };
1686 
1687  // Move a shard from the main shard path to a historical shard
1688  // path by copying the contents, and creating a new shard.
1689  auto const moveShard = [this,
1690  &lock](std::uint32_t const shardIndex) -> void {
1691  auto const dst = chooseHistoricalPath(lock);
1692 
1693  if (auto it = shards_.find(shardIndex); it != shards_.end())
1694  {
1695  auto& shard{it->second};
1696 
1697  // Close any open file descriptors before moving the shard
1698  // directory. Don't call removeOnDestroy since that would
1699  // attempt to close the fds after the directory has been moved.
1700  if (!shard->tryClose())
1701  {
1702  JLOG(j_.warn())
1703  << "can't close shard to move to historical path";
1704  return;
1705  }
1706 
1707  try
1708  {
1709  // Move the shard directory to the new path
1710  boost::filesystem::rename(
1711  shard->getDir().string(),
1712  dst / std::to_string(shardIndex));
1713  }
1714  catch (...)
1715  {
1716  JLOG(j_.error()) << "shard " << shardIndex
1717  << " failed to move to historical storage";
1718  return;
1719  }
1720 
1721  // Create a shard instance at the new location
1722  shard =
1723  std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1724 
1725  // Open the new shard
1726  if (!shard->init(scheduler_, *ctx_))
1727  {
1728  JLOG(j_.error()) << "shard " << shardIndex
1729  << " failed to open in historical storage";
1730  shard->removeOnDestroy();
1731  shard.reset();
1732  }
1733  }
1734  else
1735  {
1736  JLOG(j_.warn())
1737  << "can't find shard to move to historical path";
1738  }
1739  };
1740 
1741  // See if either of the recent shards needs to be updated
1742  bool const curNotSynched =
1743  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1744  bool const prevNotSynched = secondLatestShardIndex_ &&
1745  *secondLatestShardIndex_ != latestShardIndex - 1;
1746 
1747  // A new shard has been published. Move outdated
1748  // shards to historical storage as needed
1749  if (curNotSynched || prevNotSynched)
1750  {
1751  if (prev)
1752  {
1753  // Move the formerly second latest shard to historical storage
1754  if (keepShard(*prev) && separateHistoricalPath)
1755  {
1756  moveShard(*prev);
1757  }
1758 
1759  prev = boost::none;
1760  }
1761 
1762  if (cur)
1763  {
1764  // The formerly latest shard is now the second latest
1765  if (cur == latestShardIndex - 1)
1766  {
1767  prev = cur;
1768  }
1769 
1770  // The formerly latest shard is no longer a 'recent' shard
1771  else
1772  {
1773  // Move the formerly latest shard to historical storage
1774  if (keepShard(*cur) && separateHistoricalPath)
1775  {
1776  moveShard(*cur);
1777  }
1778  }
1779 
1780  cur = boost::none;
1781  }
1782  }
1783  }
1784 }
1785 
1786 auto
1788  std::uint32_t shardIndex,
1790  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1791 {
1792  // Any shard earlier than the two most recent shards is a historical shard
1793  auto const boundaryIndex{shardBoundaryIndex()};
1794  auto const isHistoricalShard = shardIndex < boundaryIndex;
1795 
1796  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1799 
1800  // Check shard count and available storage space
1801  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1802  {
1803  JLOG(j_.error()) << "maximum number of historical shards reached";
1804  canAdd_ = false;
1805  return boost::none;
1806  }
1807  if (!sufficientStorage(1, designation, lock))
1808  {
1809  JLOG(j_.error()) << "insufficient storage space available";
1810  canAdd_ = false;
1811  return boost::none;
1812  }
1813 
1814  return designation;
1815 }
1816 
1817 boost::filesystem::path
1819 {
1820  // If not configured with separate historical paths,
1821  // use the main path (dir_) by default.
1822  if (historicalPaths_.empty())
1823  return dir_;
1824 
1825  boost::filesystem::path historicalShardPath;
1826  std::vector<boost::filesystem::path> potentialPaths;
1827 
1828  for (boost::filesystem::path const& path : historicalPaths_)
1829  {
1830  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1831  potentialPaths.push_back(path);
1832  }
1833 
1834  if (potentialPaths.empty())
1835  {
1836  JLOG(j_.error()) << "failed to select a historical shard path";
1837  return "";
1838  }
1839 
1840  std::sample(
1841  potentialPaths.begin(),
1842  potentialPaths.end(),
1843  &historicalShardPath,
1844  1,
1845  default_prng());
1846 
1847  return historicalShardPath;
1848 }
1849 
1850 bool
1852 {
1853 #if BOOST_OS_LINUX
1854  // Each historical shard path must correspond
1855  // to a directory on a distinct device or file system.
1856  // Currently, this constraint is enforced only on Linux.
1859 
1860  for (auto const& path : historicalPaths_)
1861  {
1862  struct statvfs buffer;
1863  if (statvfs(path.c_str(), &buffer))
1864  {
1865  JLOG(j_.error())
1866  << "failed to acquire stats for 'historical_shard_path': "
1867  << path;
1868  return false;
1869  }
1870 
1871  filesystemIDs[buffer.f_fsid].push_back(path.string());
1872  }
1873 
1874  bool ret = true;
1875  for (auto const& entry : filesystemIDs)
1876  {
1877  // Check to see if any of the paths are stored on the same file system
1878  if (entry.second.size() > 1)
1879  {
1880  // Two or more historical storage paths
1881  // correspond to the same file system.
1882  JLOG(j_.error())
1883  << "The following paths correspond to the same filesystem: "
1884  << boost::algorithm::join(entry.second, ", ")
1885  << ". Each configured historical storage path should"
1886  " be on a unique device or filesystem.";
1887 
1888  ret = false;
1889  }
1890  }
1891 
1892  return ret;
1893 
1894 #else
1895  // The requirement that each historical storage path
1896  // corresponds to a distinct device or file system is
1897  // enforced only on Linux, so on other platforms
1898  // keep track of the available capacities for each
1899  // path. Issue a warning if we suspect any of the paths
1900  // may violate this requirement.
1901 
1902  // Map byte counts to each path that shares that byte count.
1904  uniqueCapacities(historicalPaths_.size());
1905 
1906  for (auto const& path : historicalPaths_)
1907  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1908  path.string());
1909 
1910  for (auto const& entry : uniqueCapacities)
1911  {
1912  // Check to see if any paths have the same amount of available bytes.
1913  if (entry.second.size() > 1)
1914  {
1915  // Two or more historical storage paths may
1916  // correspond to the same device or file system.
1917  JLOG(j_.warn())
1918  << "Each of the following paths have " << entry.first
1919  << " bytes free, and may be located on the same device"
1920  " or file system: "
1921  << boost::algorithm::join(entry.second, ", ")
1922  << ". Each configured historical storage path should"
1923  " be on a unique device or file system.";
1924  }
1925  }
1926 #endif
1927 
1928  return true;
1929 }
1930 
1931 //------------------------------------------------------------------------------
1932 
1935  Application& app,
1936  Stoppable& parent,
1937  Scheduler& scheduler,
1938  int readThreads,
1939  beast::Journal j)
1940 {
1941  // The shard store is optional. Future changes will require it.
1942  Section const& section{
1944  if (section.empty())
1945  return nullptr;
1946 
1947  return std::make_unique<DatabaseShardImp>(
1948  app, parent, "ShardStore", scheduler, readThreads, j);
1949 }
1950 
1951 } // namespace NodeStore
1952 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1934
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1133
ripple::Application
Definition: Application.h:97
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:223
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:220
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:108
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:86
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:178
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:228
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:176
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1043
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:53
ripple::NodeStore::Shard::acquire
static constexpr State acquire
Definition: Shard.h:61
std::string
STL class.
std::shared_ptr< Ledger >
ripple::NodeStore::DatabaseShardImp::getDesiredAsyncReadCount
int getDesiredAsyncReadCount(std::uint32_t ledgerSeq) override
Get the maximum number of async reads the node store prefers.
Definition: DatabaseShardImp.cpp:1078
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1225
ripple::SizedItem
SizedItem
Definition: Config.h:48
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::Stoppable::stopped
void stopped()
Called by derived classes to indicate that the stoppable has stopped.
Definition: Stoppable.cpp:72
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:171
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:405
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:249
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:215
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::CashFilter::none
@ none
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:185
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:616
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1477
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:242
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::NodeStore::DatabaseShardImp::asyncFetch
bool asyncFetch(uint256 const &hash, std::uint32_t ledgerSeq, std::shared_ptr< NodeObject > &nodeObject) override
Fetch an object without waiting.
Definition: DatabaseShardImp.cpp:1018
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1604
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1340
boost
Definition: IPAddress.h:117
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:217
ripple::NodeStore::Shard::complete
static constexpr State complete
Definition: Shard.h:62
std::vector::front
T front(T... args)
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:229
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:43
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1113
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:292
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:212
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:99
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1255
ripple::NodeStore::DatabaseShardImp::getCacheHitRate
float getCacheHitRate() override
Get the positive cache hits to total attempts ratio.
Definition: DatabaseShardImp.cpp:1096
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:226
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:203
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:414
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:967
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:201
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1818
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1492
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:179
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:678
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:92
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:69
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:197
ripple::Config::standalone
bool standalone() const
Definition: Config.h:236
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1580
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::Stoppable::areChildrenStopped
bool areChildrenStopped() const
Returns true if all children have stopped.
Definition: Stoppable.cpp:66
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:432
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:984
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1851
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1167
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:241
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:241
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1428
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:194
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:183
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:61
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:158
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:258
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:191
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:67
ripple::NodeStore::Shard::final
static constexpr State final
Definition: Shard.h:64
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::Database::stopReadThreads
void stopReadThreads()
Definition: Database.cpp:93
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:212
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:177
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1274
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:232
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:734
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:209
ripple::NodeStore::Database::asyncFetch
virtual bool asyncFetch(uint256 const &hash, std::uint32_t ledgerSeq, std::shared_ptr< NodeObject > &nodeObject)=0
Fetch an object without waiting.
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:247
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:538
std::vector::begin
T begin(T... args)
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1623
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1634
ripple::NodeStore::DatabaseShardImp::onChildrenStopped
void onChildrenStopped() override
Override called when all children have stopped.
Definition: DatabaseShardImp.cpp:700
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1610
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:296
std::unique
T unique(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1287
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1547
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:200
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:248
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::shards_
std::unordered_map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:188
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1787
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:687
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:182
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:206