rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
61  , openFinalLimit_(
62  app.config().getValueFor(SizedItem::openFinalLimit, boost::none))
63 {
64  if (app.config().reporting())
65  {
66  Throw<std::runtime_error>(
67  "Attempted to create DatabaseShardImp in reporting mode. Reporting "
68  "does not support shards. Remove shards info from config");
69  }
70 }
71 
72 bool
74 {
75  {
76  std::lock_guard lock(mutex_);
77  if (init_)
78  {
79  JLOG(j_.error()) << "already initialized";
80  return false;
81  }
82 
83  if (!initConfig(lock))
84  {
85  JLOG(j_.error()) << "invalid configuration file settings";
86  return false;
87  }
88 
89  try
90  {
91  using namespace boost::filesystem;
92 
93  // Consolidate the main storage path and all historical paths
94  std::vector<path> paths{dir_};
95  paths.insert(
96  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
97 
98  for (auto const& path : paths)
99  {
100  if (exists(path))
101  {
102  if (!is_directory(path))
103  {
104  JLOG(j_.error()) << path << " must be a directory";
105  return false;
106  }
107  }
108  else if (!create_directories(path))
109  {
110  JLOG(j_.error())
111  << "failed to create path: " + path.string();
112  return false;
113  }
114  }
115 
117  {
118  // Check historical paths for duplicated file systems
119  if (!checkHistoricalPaths())
120  return false;
121  }
122 
123  ctx_ = std::make_unique<nudb::context>();
124  ctx_->start();
125 
126  // Find shards
127  std::uint32_t openFinals{0};
128  for (auto const& path : paths)
129  {
130  for (auto const& it : directory_iterator(path))
131  {
132  // Ignore files
133  if (!is_directory(it))
134  continue;
135 
136  // Ignore nonnumerical directory names
137  auto const shardDir{it.path()};
138  auto dirName{shardDir.stem().string()};
139  if (!std::all_of(
140  dirName.begin(), dirName.end(), [](auto c) {
141  return ::isdigit(static_cast<unsigned char>(c));
142  }))
143  {
144  continue;
145  }
146 
147  // Ignore values below the earliest shard index
148  auto const shardIndex{std::stoul(dirName)};
149  if (shardIndex < earliestShardIndex())
150  {
151  JLOG(j_.debug())
152  << "shard " << shardIndex
153  << " ignored, comes before earliest shard index "
154  << earliestShardIndex();
155  continue;
156  }
157 
158  // Check if a previous import failed
159  if (is_regular_file(shardDir / importMarker_))
160  {
161  JLOG(j_.warn())
162  << "shard " << shardIndex
163  << " previously failed import, removing";
164  remove_all(shardDir);
165  continue;
166  }
167 
168  auto shard{std::make_shared<Shard>(
169  app_, *this, shardIndex, shardDir.parent_path(), j_)};
170  if (!shard->init(scheduler_, *ctx_))
171  {
172  // Remove corrupted or legacy shard
173  shard->removeOnDestroy();
174  JLOG(j_.warn())
175  << "shard " << shardIndex << " removed, "
176  << (shard->isLegacy() ? "legacy" : "corrupted")
177  << " shard";
178  continue;
179  }
180 
181  switch (shard->getState())
182  {
183  case Shard::final:
184  if (++openFinals > openFinalLimit_)
185  shard->tryClose();
186  shards_.emplace(shardIndex, std::move(shard));
187  break;
188 
189  case Shard::complete:
191  shards_.emplace(shardIndex, std::move(shard))
192  .first->second,
193  true,
194  boost::none);
195  break;
196 
197  case Shard::acquire:
198  if (acquireIndex_ != 0)
199  {
200  JLOG(j_.error())
201  << "more than one shard being acquired";
202  return false;
203  }
204 
205  shards_.emplace(shardIndex, std::move(shard));
206  acquireIndex_ = shardIndex;
207  break;
208 
209  default:
210  JLOG(j_.error())
211  << "shard " << shardIndex << " invalid state";
212  return false;
213  }
214  }
215  }
216  }
217  catch (std::exception const& e)
218  {
219  JLOG(j_.fatal()) << "Exception caught in function " << __func__
220  << ". Error: " << e.what();
221  return false;
222  }
223 
224  updateStatus(lock);
226  init_ = true;
227  }
228 
229  setFileStats();
230  return true;
231 }
232 
233 boost::optional<std::uint32_t>
235 {
236  boost::optional<std::uint32_t> shardIndex;
237 
238  {
239  std::lock_guard lock(mutex_);
240  assert(init_);
241 
242  if (acquireIndex_ != 0)
243  {
244  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
245  return it->second->prepare();
246 
247  // Should never get here
248  assert(false);
249  return boost::none;
250  }
251 
252  if (!canAdd_)
253  return boost::none;
254 
255  shardIndex = findAcquireIndex(validLedgerSeq, lock);
256  }
257 
258  if (!shardIndex)
259  {
260  JLOG(j_.debug()) << "no new shards to add";
261  {
262  std::lock_guard lock(mutex_);
263  canAdd_ = false;
264  }
265  return boost::none;
266  }
267 
268  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
269  std::lock_guard lock(mutex_);
270  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
271  }();
272 
273  if (!pathDesignation)
274  return boost::none;
275 
276  auto const needsHistoricalPath =
277  *pathDesignation == PathDesignation::historical;
278 
279  auto shard = [this, shardIndex, needsHistoricalPath] {
280  std::lock_guard lock(mutex_);
281  return std::make_unique<Shard>(
282  app_,
283  *this,
284  *shardIndex,
285  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
286  j_);
287  }();
288 
289  if (!shard->init(scheduler_, *ctx_))
290  return boost::none;
291 
292  auto const ledgerSeq{shard->prepare()};
293  {
294  std::lock_guard lock(mutex_);
295  shards_.emplace(*shardIndex, std::move(shard));
296  acquireIndex_ = *shardIndex;
297  }
298  return ledgerSeq;
299 }
300 
301 bool
303 {
304  auto fail = [j = j_, &shardIndexes](
305  std::string const& msg,
306  boost::optional<std::uint32_t> shardIndex = boost::none) {
307  auto multipleIndexPrequel = [&shardIndexes] {
308  std::vector<std::string> indexesAsString(shardIndexes.size());
310  shardIndexes.begin(),
311  shardIndexes.end(),
312  indexesAsString.begin(),
313  [](uint32_t const index) { return std::to_string(index); });
314 
315  return std::string("shard") +
316  (shardIndexes.size() > 1 ? "s " : " ") +
317  boost::algorithm::join(indexesAsString, ", ");
318  };
319 
320  std::string const prequel = shardIndex
321  ? "shard " + std::to_string(*shardIndex)
322  : multipleIndexPrequel();
323 
324  JLOG(j.error()) << prequel << " " << msg;
325  return false;
326  };
327 
328  std::lock_guard lock(mutex_);
329  assert(init_);
330 
331  if (!canAdd_)
332  return fail("cannot be stored at this time");
333 
334  auto historicalShardsToPrepare = 0;
335 
336  for (auto const shardIndex : shardIndexes)
337  {
338  if (shardIndex < earliestShardIndex())
339  {
340  return fail(
341  "comes before earliest shard index " +
343  shardIndex);
344  }
345 
346  // If we are synced to the network, check if the shard index is
347  // greater or equal to the current or validated shard index.
348  auto seqCheck = [&](std::uint32_t ledgerSeq) {
349  if (ledgerSeq >= earliestLedgerSeq() &&
350  shardIndex >= seqToShardIndex(ledgerSeq))
351  {
352  return fail("invalid index", shardIndex);
353  }
354  return true;
355  };
356  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
358  {
359  return fail("invalid index", shardIndex);
360  }
361 
362  if (shards_.find(shardIndex) != shards_.end())
363  return fail("is already stored", shardIndex);
364 
365  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
366  return fail("is already queued for import", shardIndex);
367 
368  // Any shard earlier than the two most recent shards
369  // is a historical shard
370  if (shardIndex < shardBoundaryIndex())
371  ++historicalShardsToPrepare;
372  }
373 
374  auto const numHistShards = numHistoricalShards(lock);
375 
376  // Check shard count and available storage space
377  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
378  return fail("maximum number of historical shards reached");
379 
380  if (historicalShardsToPrepare)
381  {
382  // Check available storage space for historical shards
383  if (!sufficientStorage(
384  historicalShardsToPrepare, PathDesignation::historical, lock))
385  return fail("insufficient storage space available");
386  }
387 
388  if (auto const recentShardsToPrepare =
389  shardIndexes.size() - historicalShardsToPrepare;
390  recentShardsToPrepare)
391  {
392  // Check available storage space for recent shards
393  if (!sufficientStorage(
394  recentShardsToPrepare, PathDesignation::none, lock))
395  return fail("insufficient storage space available");
396  }
397 
398  for (auto const shardIndex : shardIndexes)
399  {
400  auto const prepareSuccessful =
401  preparedIndexes_.emplace(shardIndex).second;
402 
403  (void)prepareSuccessful;
404  assert(prepareSuccessful);
405  }
406 
407  return true;
408 }
409 
410 void
412 {
413  std::lock_guard lock(mutex_);
414  assert(init_);
415 
416  preparedIndexes_.erase(shardIndex);
417 }
418 
421 {
423  {
424  std::lock_guard lock(mutex_);
425  assert(init_);
426 
427  for (auto const& shardIndex : preparedIndexes_)
428  rs.insert(shardIndex);
429  }
430 
431  if (rs.empty())
432  return {};
433 
434  return to_string(rs);
435 };
436 
437 bool
439  std::uint32_t shardIndex,
440  boost::filesystem::path const& srcDir)
441 {
442  auto fail = [&](std::string const& msg,
443  std::lock_guard<std::mutex> const& lock) {
444  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
445 
446  // Remove the failed import shard index so it can be retried
447  preparedIndexes_.erase(shardIndex);
448  return false;
449  };
450 
451  using namespace boost::filesystem;
452  try
453  {
454  if (!is_directory(srcDir) || is_empty(srcDir))
455  {
456  return fail(
457  "invalid source directory " + srcDir.string(),
459  }
460  }
461  catch (std::exception const& e)
462  {
463  return fail(
464  std::string(". Exception caught in function ") + __func__ +
465  ". Error: " + e.what(),
467  }
468 
469  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
471  if (!expectedHash)
472  return fail("expected hash not found", std::lock_guard(mutex_));
473 
474  path dstDir;
475  {
476  std::lock_guard lock(mutex_);
477  if (shards_.find(shardIndex) != shards_.end())
478  return fail("already exists", lock);
479 
480  // Check shard was prepared for import
481  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
482  return fail("was not prepared for import", lock);
483 
484  auto const pathDesignation{
485  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
486  if (!pathDesignation)
487  return fail("failed to import", lock);
488 
489  if (*pathDesignation == PathDesignation::historical)
490  dstDir = chooseHistoricalPath(lock);
491  else
492  dstDir = dir_;
493  }
494  dstDir /= std::to_string(shardIndex);
495 
496  auto renameDir = [&](path const& src, path const& dst) {
497  try
498  {
499  rename(src, dst);
500  }
501  catch (std::exception const& e)
502  {
503  return fail(
504  std::string(". Exception caught in function ") + __func__ +
505  ". Error: " + e.what(),
507  }
508  return true;
509  };
510 
511  // Rename source directory to the shard database directory
512  if (!renameDir(srcDir, dstDir))
513  return false;
514 
515  // Create the new shard
516  auto shard{std::make_unique<Shard>(
517  app_, *this, shardIndex, dstDir.parent_path(), j_)};
518 
519  if (!shard->init(scheduler_, *ctx_) || shard->getState() != Shard::complete)
520  {
521  shard.reset();
522  renameDir(dstDir, srcDir);
523  return fail("failed to import", std::lock_guard(mutex_));
524  }
525 
526  auto const [it, inserted] = [&]() {
527  std::lock_guard lock(mutex_);
528  preparedIndexes_.erase(shardIndex);
529  return shards_.emplace(shardIndex, std::move(shard));
530  }();
531 
532  if (!inserted)
533  {
534  shard.reset();
535  renameDir(dstDir, srcDir);
536  return fail("failed to import", std::lock_guard(mutex_));
537  }
538 
539  finalizeShard(it->second, true, expectedHash);
540  return true;
541 }
542 
543 Backend&
545 {
546  return app_.getNodeStore().getBackend();
547 }
548 
551 {
552  auto const shardIndex{seqToShardIndex(ledgerSeq)};
553  {
555  {
556  std::lock_guard lock(mutex_);
557  assert(init_);
558 
559  auto const it{shards_.find(shardIndex)};
560  if (it == shards_.end())
561  return nullptr;
562  shard = it->second;
563  }
564 
565  // Ledger must be stored in a final or acquiring shard
566  switch (shard->getState())
567  {
568  case Shard::final:
569  break;
570  case Shard::acquire:
571  if (shard->containsLedger(ledgerSeq))
572  break;
573  [[fallthrough]];
574  default:
575  return nullptr;
576  }
577  }
578 
579  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
580  if (!nodeObject)
581  return nullptr;
582 
583  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
584  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
585  return nullptr;
586  };
587 
588  auto ledger{std::make_shared<Ledger>(
589  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
590  app_.config(),
591  *app_.getShardFamily())};
592 
593  if (ledger->info().seq != ledgerSeq)
594  {
595  return fail(
596  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
597  }
598  if (ledger->info().hash != hash)
599  {
600  return fail(
601  "encountered invalid ledger hash " + to_string(hash) +
602  " on sequence " + std::to_string(ledgerSeq));
603  }
604 
605  ledger->setFull();
606  if (!ledger->stateMap().fetchRoot(
607  SHAMapHash{ledger->info().accountHash}, nullptr))
608  {
609  return fail(
610  "is missing root STATE node on hash " + to_string(hash) +
611  " on sequence " + std::to_string(ledgerSeq));
612  }
613 
614  if (ledger->info().txHash.isNonZero())
615  {
616  if (!ledger->txMap().fetchRoot(
617  SHAMapHash{ledger->info().txHash}, nullptr))
618  {
619  return fail(
620  "is missing root TXN node on hash " + to_string(hash) +
621  " on sequence " + std::to_string(ledgerSeq));
622  }
623  }
624  return ledger;
625 }
626 
627 void
629 {
630  auto const ledgerSeq{ledger->info().seq};
631  if (ledger->info().hash.isZero())
632  {
633  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
634  << ledgerSeq;
635  return;
636  }
637  if (ledger->info().accountHash.isZero())
638  {
639  JLOG(j_.error()) << "zero account hash for ledger sequence "
640  << ledgerSeq;
641  return;
642  }
643  if (ledger->stateMap().getHash().isNonZero() &&
644  !ledger->stateMap().isValid())
645  {
646  JLOG(j_.error()) << "invalid state map for ledger sequence "
647  << ledgerSeq;
648  return;
649  }
650  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
651  {
652  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
653  << ledgerSeq;
654  return;
655  }
656 
657  auto const shardIndex{seqToShardIndex(ledgerSeq)};
659  {
660  std::lock_guard lock(mutex_);
661  assert(init_);
662 
663  if (shardIndex != acquireIndex_)
664  {
665  JLOG(j_.trace())
666  << "shard " << shardIndex << " is not being acquired";
667  return;
668  }
669 
670  auto const it{shards_.find(shardIndex)};
671  if (it == shards_.end())
672  {
673  JLOG(j_.error())
674  << "shard " << shardIndex << " is not being acquired";
675  return;
676  }
677  shard = it->second;
678  }
679 
680  if (shard->containsLedger(ledgerSeq))
681  {
682  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
683  return;
684  }
685 
686  setStoredInShard(shard, ledger);
687 }
688 
691 {
692  std::lock_guard lock(mutex_);
693  assert(init_);
694 
695  return status_;
696 }
697 
698 void
700 {
701  // Stop read threads in base before data members are destroyed
702  stopReadThreads();
703 
704  std::lock_guard lock(mutex_);
705 
706  // Notify shards to stop
707  for (auto const& e : shards_)
708  e.second->stop();
709 }
710 
711 void
713 {
715  {
716  std::lock_guard lock(mutex_);
717 
718  shards.reserve(shards_.size());
719  for (auto const& e : shards_)
720  shards.push_back(e.second);
721  shards_.clear();
722  }
723 
724  // All shards should be expired at this point
725  for (auto const& e : shards)
726  {
727  if (!e.expired())
728  {
729  std::string shardIndex;
730  if (auto const shard{e.lock()}; shard)
731  shardIndex = std::to_string(shard->index());
732 
733  JLOG(j_.warn()) << " shard " << shardIndex << " unexpired";
734  }
735  }
736 
737  stopped();
738 }
739 
740 void
742 {
743  {
744  std::lock_guard lock(mutex_);
745  assert(init_);
746 
747  // Only the application local node store can be imported
748  if (&source != &app_.getNodeStore())
749  {
750  assert(false);
751  JLOG(j_.error()) << "invalid source database";
752  return;
753  }
754 
755  std::uint32_t earliestIndex;
756  std::uint32_t latestIndex;
757  {
758  auto loadLedger = [&](bool ascendSort =
759  true) -> boost::optional<std::uint32_t> {
761  std::uint32_t ledgerSeq;
762  std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper(
763  "WHERE LedgerSeq >= " +
765  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
766  " limit 1",
767  app_,
768  false);
769  if (!ledger || ledgerSeq == 0)
770  {
771  JLOG(j_.error()) << "no suitable ledgers were found in"
772  " the SQLite database to import";
773  return boost::none;
774  }
775  return ledgerSeq;
776  };
777 
778  // Find earliest ledger sequence stored
779  auto ledgerSeq{loadLedger()};
780  if (!ledgerSeq)
781  return;
782  earliestIndex = seqToShardIndex(*ledgerSeq);
783 
784  // Consider only complete shards
785  if (ledgerSeq != firstLedgerSeq(earliestIndex))
786  ++earliestIndex;
787 
788  // Find last ledger sequence stored
789  ledgerSeq = loadLedger(false);
790  if (!ledgerSeq)
791  return;
792  latestIndex = seqToShardIndex(*ledgerSeq);
793 
794  // Consider only complete shards
795  if (ledgerSeq != lastLedgerSeq(latestIndex))
796  --latestIndex;
797 
798  if (latestIndex < earliestIndex)
799  {
800  JLOG(j_.error()) << "no suitable ledgers were found in"
801  " the SQLite database to import";
802  return;
803  }
804  }
805 
806  auto numHistShards = this->numHistoricalShards(lock);
807 
808  // Import the shards
809  for (std::uint32_t shardIndex = earliestIndex;
810  shardIndex <= latestIndex;
811  ++shardIndex)
812  {
813  auto const pathDesignation =
814  prepareForNewShard(shardIndex, numHistShards, lock);
815 
816  if (!pathDesignation)
817  break;
818 
819  auto const needsHistoricalPath =
820  *pathDesignation == PathDesignation::historical;
821 
822  // Skip if being acquired
823  if (shardIndex == acquireIndex_)
824  {
825  JLOG(j_.debug())
826  << "shard " << shardIndex << " already being acquired";
827  continue;
828  }
829 
830  // Skip if being imported
831  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
832  {
833  JLOG(j_.debug())
834  << "shard " << shardIndex << " already being imported";
835  continue;
836  }
837 
838  // Skip if stored
839  if (shards_.find(shardIndex) != shards_.end())
840  {
841  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
842  continue;
843  }
844 
845  // Verify SQLite ledgers are in the node store
846  {
847  auto const firstSeq{firstLedgerSeq(shardIndex)};
848  auto const lastSeq{
849  std::max(firstSeq, lastLedgerSeq(shardIndex))};
850  auto const numLedgers{
851  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
852  : ledgersPerShard_};
853  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
854  if (ledgerHashes.size() != numLedgers)
855  continue;
856 
857  bool valid{true};
858  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
859  {
860  if (!source.fetchNodeObject(ledgerHashes[n].first, n))
861  {
862  JLOG(j_.warn()) << "SQLite ledger sequence " << n
863  << " mismatches node store";
864  valid = false;
865  break;
866  }
867  }
868  if (!valid)
869  continue;
870  }
871 
872  auto const path =
873  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
874 
875  // Create the new shard
876  auto shard{
877  std::make_unique<Shard>(app_, *this, shardIndex, path, j_)};
878  if (!shard->init(scheduler_, *ctx_))
879  continue;
880 
881  // Create a marker file to signify an import in progress
882  auto const shardDir{path / std::to_string(shardIndex)};
883  auto const markerFile{shardDir / importMarker_};
884  {
885  std::ofstream ofs{markerFile.string()};
886  if (!ofs.is_open())
887  {
888  JLOG(j_.error()) << "shard " << shardIndex
889  << " failed to create temp marker file";
890  shard->removeOnDestroy();
891  continue;
892  }
893  ofs.close();
894  }
895 
896  // Copy the ledgers from node store
897  std::shared_ptr<Ledger> recentStored;
898  boost::optional<uint256> lastLedgerHash;
899 
900  while (auto const ledgerSeq = shard->prepare())
901  {
902  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
903  if (!ledger || ledger->info().seq != ledgerSeq)
904  break;
905 
906  auto const result{shard->storeLedger(ledger, recentStored)};
907  storeStats(result.count, result.size);
908  if (result.error)
909  break;
910 
911  if (!shard->setLedgerStored(ledger))
912  break;
913 
914  if (!lastLedgerHash && ledgerSeq == lastLedgerSeq(shardIndex))
915  lastLedgerHash = ledger->info().hash;
916 
917  recentStored = std::move(ledger);
918  }
919 
920  using namespace boost::filesystem;
921  bool success{false};
922  if (lastLedgerHash && shard->getState() == Shard::complete)
923  {
924  // Store shard final key
925  Serializer s;
927  s.add32(firstLedgerSeq(shardIndex));
928  s.add32(lastLedgerSeq(shardIndex));
929  s.addBitString(*lastLedgerHash);
930  auto const nodeObject{NodeObject::createObject(
931  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
932 
933  if (shard->storeNodeObject(nodeObject))
934  {
935  try
936  {
937  // The import process is complete and the
938  // marker file is no longer required
939  remove_all(markerFile);
940 
941  JLOG(j_.debug()) << "shard " << shardIndex
942  << " was successfully imported";
944  shards_.emplace(shardIndex, std::move(shard))
945  .first->second,
946  true,
947  boost::none);
948  success = true;
949 
950  if (shardIndex < shardBoundaryIndex())
951  ++numHistShards;
952  }
953  catch (std::exception const& e)
954  {
955  JLOG(j_.fatal()) << "shard index " << shardIndex
956  << ". Exception caught in function "
957  << __func__ << ". Error: " << e.what();
958  }
959  }
960  }
961 
962  if (!success)
963  {
964  JLOG(j_.error())
965  << "shard " << shardIndex << " failed to import";
966  shard->removeOnDestroy();
967  }
968  }
969 
970  updateStatus(lock);
971  }
972 
973  setFileStats();
974 }
975 
978 {
980  {
981  std::lock_guard lock(mutex_);
982  assert(init_);
983 
984  auto const it{shards_.find(acquireIndex_)};
985  if (it == shards_.end())
986  return 0;
987  shard = it->second;
988  }
989 
990  return shard->getWriteLoad();
991 }
992 
993 void
995  NodeObjectType type,
996  Blob&& data,
997  uint256 const& hash,
998  std::uint32_t ledgerSeq)
999 {
1000  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1001  std::shared_ptr<Shard> shard;
1002  {
1003  std::lock_guard lock(mutex_);
1004  if (shardIndex != acquireIndex_)
1005  {
1006  JLOG(j_.trace())
1007  << "shard " << shardIndex << " is not being acquired";
1008  return;
1009  }
1010 
1011  auto const it{shards_.find(shardIndex)};
1012  if (it == shards_.end())
1013  {
1014  JLOG(j_.error())
1015  << "shard " << shardIndex << " is not being acquired";
1016  return;
1017  }
1018  shard = it->second;
1019  }
1020 
1021  auto const nodeObject{
1022  NodeObject::createObject(type, std::move(data), hash)};
1023  if (shard->storeNodeObject(nodeObject))
1024  storeStats(1, nodeObject->getData().size());
1025 }
1026 
1027 bool
1029 {
1030  auto const ledgerSeq{srcLedger->info().seq};
1031  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1032  std::shared_ptr<Shard> shard;
1033  {
1034  std::lock_guard lock(mutex_);
1035  assert(init_);
1036 
1037  if (shardIndex != acquireIndex_)
1038  {
1039  JLOG(j_.trace())
1040  << "shard " << shardIndex << " is not being acquired";
1041  return false;
1042  }
1043 
1044  auto const it{shards_.find(shardIndex)};
1045  if (it == shards_.end())
1046  {
1047  JLOG(j_.error())
1048  << "shard " << shardIndex << " is not being acquired";
1049  return false;
1050  }
1051  shard = it->second;
1052  }
1053 
1054  auto const result{shard->storeLedger(srcLedger, nullptr)};
1055  storeStats(result.count, result.size);
1056  if (result.error || result.count == 0 || result.size == 0)
1057  return false;
1058 
1059  return setStoredInShard(shard, srcLedger);
1060 }
1061 
1062 void
1064 {
1066  {
1067  std::lock_guard lock(mutex_);
1068  assert(init_);
1069 
1070  shards.reserve(shards_.size());
1071  for (auto const& e : shards_)
1072  shards.push_back(e.second);
1073  }
1074 
1076  openFinals.reserve(openFinalLimit_);
1077 
1078  for (auto const& e : shards)
1079  {
1080  if (auto const shard{e.lock()}; shard && shard->isOpen())
1081  {
1082  shard->sweep();
1083 
1084  if (shard->getState() == Shard::final)
1085  openFinals.emplace_back(std::move(shard));
1086  }
1087  }
1088 
1089  if (openFinals.size() > openFinalLimit_)
1090  {
1091  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1092  << openFinalLimit_ << " by "
1093  << (openFinals.size() - openFinalLimit_);
1094 
1095  // Try to close enough shards to be within the limit.
1096  // Sort ascending on last use so the oldest are removed first.
1097  std::sort(
1098  openFinals.begin(),
1099  openFinals.end(),
1100  [&](std::shared_ptr<Shard> const& lhsShard,
1101  std::shared_ptr<Shard> const& rhsShard) {
1102  return lhsShard->getLastUse() < rhsShard->getLastUse();
1103  });
1104 
1105  for (auto it{openFinals.cbegin()};
1106  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1107  {
1108  if ((*it)->tryClose())
1109  it = openFinals.erase(it);
1110  else
1111  ++it;
1112  }
1113  }
1114 }
1115 
1116 bool
1118 {
1119  auto fail = [j = j_](std::string const& msg) {
1120  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1121  return false;
1122  };
1123 
1124  Config const& config{app_.config()};
1125  Section const& section{config.section(ConfigSection::shardDatabase())};
1126 
1127  {
1128  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1129  // A custom earliest ledger sequence can be set through the
1130  // configuration file using the 'earliest_seq' field under the
1131  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1132  // have a value greater than zero and be equally assigned in
1133  // both stanzas.
1134 
1135  std::uint32_t shardDBEarliestSeq{0};
1136  get_if_exists<std::uint32_t>(
1137  section, "earliest_seq", shardDBEarliestSeq);
1138 
1139  std::uint32_t nodeDBEarliestSeq{0};
1140  get_if_exists<std::uint32_t>(
1141  config.section(ConfigSection::nodeDatabase()),
1142  "earliest_seq",
1143  nodeDBEarliestSeq);
1144 
1145  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1146  {
1147  return fail(
1148  "and [" + ConfigSection::nodeDatabase() +
1149  "] define different 'earliest_seq' values");
1150  }
1151  }
1152 
1153  using namespace boost::filesystem;
1154  if (!get_if_exists<path>(section, "path", dir_))
1155  return fail("'path' missing");
1156 
1157  {
1158  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1159 
1160  Section const& historicalShardPaths =
1161  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1162 
1163  auto values = historicalShardPaths.values();
1164 
1165  std::sort(values.begin(), values.end());
1166  values.erase(std::unique(values.begin(), values.end()), values.end());
1167 
1168  for (auto const& s : values)
1169  {
1170  auto const dir = path(s);
1171  if (dir_ == dir)
1172  {
1173  return fail(
1174  "the 'path' cannot also be in the "
1175  "'historical_shard_path' section");
1176  }
1177 
1179  }
1180  }
1181 
1182  if (section.exists("ledgers_per_shard"))
1183  {
1184  // To be set only in standalone for testing
1185  if (!config.standalone())
1186  return fail("'ledgers_per_shard' only honored in stand alone");
1187 
1188  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1189  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1190  return fail("'ledgers_per_shard' must be a multiple of 256");
1191 
1194  }
1195 
1196  // NuDB is the default and only supported permanent storage backend
1197  backendName_ = get<std::string>(section, "type", "nudb");
1198  if (!boost::iequals(backendName_, "NuDB"))
1199  return fail("'type' value unsupported");
1200 
1201  return true;
1202 }
1203 
1206  uint256 const& hash,
1207  std::uint32_t ledgerSeq,
1208  FetchReport& fetchReport)
1209 {
1210  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1211  std::shared_ptr<Shard> shard;
1212  {
1213  std::lock_guard lock(mutex_);
1214  auto const it{shards_.find(shardIndex)};
1215  if (it == shards_.end())
1216  return nullptr;
1217  shard = it->second;
1218  }
1219 
1220  return shard->fetchNodeObject(hash, fetchReport);
1221 }
1222 
1223 boost::optional<std::uint32_t>
1225  std::uint32_t validLedgerSeq,
1227 {
1228  if (validLedgerSeq < earliestLedgerSeq())
1229  return boost::none;
1230 
1231  auto const maxShardIndex{[this, validLedgerSeq]() {
1232  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1233  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1234  --shardIndex;
1235  return shardIndex;
1236  }()};
1237  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1238 
1239  // Check if the shard store has all shards
1240  if (shards_.size() >= maxNumShards)
1241  return boost::none;
1242 
1243  if (maxShardIndex < 1024 ||
1244  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1245  {
1246  // Small or mostly full index space to sample
1247  // Find the available indexes and select one at random
1249  available.reserve(maxNumShards - shards_.size());
1250 
1251  for (auto shardIndex = earliestShardIndex();
1252  shardIndex <= maxShardIndex;
1253  ++shardIndex)
1254  {
1255  if (shards_.find(shardIndex) == shards_.end() &&
1256  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1257  {
1258  available.push_back(shardIndex);
1259  }
1260  }
1261 
1262  if (available.empty())
1263  return boost::none;
1264 
1265  if (available.size() == 1)
1266  return available.front();
1267 
1268  return available[rand_int(
1269  0u, static_cast<std::uint32_t>(available.size() - 1))];
1270  }
1271 
1272  // Large, sparse index space to sample
1273  // Keep choosing indexes at random until an available one is found
1274  // chances of running more than 30 times is less than 1 in a billion
1275  for (int i = 0; i < 40; ++i)
1276  {
1277  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1278  if (shards_.find(shardIndex) == shards_.end() &&
1279  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1280  {
1281  return shardIndex;
1282  }
1283  }
1284 
1285  assert(false);
1286  return boost::none;
1287 }
1288 
1289 void
1291  std::shared_ptr<Shard>& shard,
1292  bool writeSQLite,
1293  boost::optional<uint256> const& expectedHash)
1294 {
1295  taskQueue_->addTask([this,
1296  wptr = std::weak_ptr<Shard>(shard),
1297  writeSQLite,
1298  expectedHash]() {
1299  if (isStopping())
1300  return;
1301 
1302  auto shard{wptr.lock()};
1303  if (!shard)
1304  {
1305  JLOG(j_.debug()) << "Shard removed before being finalized";
1306  return;
1307  }
1308 
1309  if (!shard->finalize(writeSQLite, expectedHash))
1310  {
1311  if (isStopping())
1312  return;
1313 
1314  // Invalid or corrupt shard, remove it
1315  removeFailedShard(shard);
1316  return;
1317  }
1318 
1319  if (isStopping())
1320  return;
1321 
1322  {
1323  auto const boundaryIndex{shardBoundaryIndex()};
1324 
1325  std::lock_guard lock(mutex_);
1326  updateStatus(lock);
1327 
1328  if (shard->index() < boundaryIndex)
1329  {
1330  // This is a historical shard
1331  if (!historicalPaths_.empty() &&
1332  shard->getDir().parent_path() == dir_)
1333  {
1334  // Shard wasn't placed at a separate historical path
1335  JLOG(j_.warn()) << "shard " << shard->index()
1336  << " is not stored at a historical path";
1337  }
1338  }
1339 
1340  else
1341  {
1342  // Not a historical shard. Shift recent shards if necessary
1343  relocateOutdatedShards(lock);
1344  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1345 
1346  auto& recentShard = shard->index() == boundaryIndex
1349 
1350  // Set the appropriate recent shard index
1351  recentShard = shard->index();
1352 
1353  if (shard->getDir().parent_path() != dir_)
1354  {
1355  JLOG(j_.warn()) << "shard " << shard->index()
1356  << " is not stored at the path";
1357  }
1358  }
1359  }
1360 
1361  setFileStats();
1362 
1363  // Update peers with new shard index
1364  if (!app_.config().standalone() &&
1366  {
1367  protocol::TMPeerShardInfo message;
1368  PublicKey const& publicKey{app_.nodeIdentity().first};
1369  message.set_nodepubkey(publicKey.data(), publicKey.size());
1370  message.set_shardindexes(std::to_string(shard->index()));
1371  app_.overlay().foreach(send_always(std::make_shared<Message>(
1372  message, protocol::mtPEER_SHARD_INFO)));
1373  }
1374  });
1375 }
1376 
1377 void
1379 {
1381  {
1382  std::lock_guard lock(mutex_);
1383  if (shards_.empty())
1384  return;
1385 
1386  shards.reserve(shards_.size());
1387  for (auto const& e : shards_)
1388  shards.push_back(e.second);
1389  }
1390 
1391  std::uint64_t sumSz{0};
1392  std::uint32_t sumFd{0};
1393  std::uint32_t numShards{0};
1394  for (auto const& e : shards)
1395  {
1396  if (auto const shard{e.lock()}; shard)
1397  {
1398  auto const [sz, fd] = shard->getFileInfo();
1399  sumSz += sz;
1400  sumFd += fd;
1401  ++numShards;
1402  }
1403  }
1404 
1405  std::lock_guard lock(mutex_);
1406  fileSz_ = sumSz;
1407  fdRequired_ = sumFd;
1408  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1409 
1410  if (!canAdd_)
1411  return;
1412 
1413  if (auto const count = numHistoricalShards(lock);
1414  count >= maxHistoricalShards_)
1415  {
1417  {
1418  // In order to avoid excessive output, don't produce
1419  // this warning if the server isn't configured to
1420  // store historical shards.
1421  JLOG(j_.warn()) << "maximum number of historical shards reached";
1422  }
1423 
1424  canAdd_ = false;
1425  }
1426  else if (!sufficientStorage(
1427  maxHistoricalShards_ - count,
1429  lock))
1430  {
1431  JLOG(j_.warn())
1432  << "maximum shard store size exceeds available storage space";
1433 
1434  canAdd_ = false;
1435  }
1436 }
1437 
1438 void
1440 {
1441  if (!shards_.empty())
1442  {
1444  for (auto const& e : shards_)
1445  if (e.second->getState() == Shard::final)
1446  rs.insert(e.second->index());
1447  status_ = to_string(rs);
1448  }
1449  else
1450  status_.clear();
1451 }
1452 
1453 bool
1455  std::uint32_t numShards,
1456  PathDesignation pathDesignation,
1457  std::lock_guard<std::mutex> const&) const
1458 {
1459  try
1460  {
1461  std::vector<std::uint64_t> capacities;
1462 
1463  if (pathDesignation == PathDesignation::historical &&
1465  {
1466  capacities.reserve(historicalPaths_.size());
1467 
1468  for (auto const& path : historicalPaths_)
1469  {
1470  // Get the available storage for each historical path
1471  auto const availableSpace =
1472  boost::filesystem::space(path).available;
1473 
1474  capacities.push_back(availableSpace);
1475  }
1476  }
1477  else
1478  {
1479  // Get the available storage for the main shard path
1480  capacities.push_back(boost::filesystem::space(dir_).available);
1481  }
1482 
1483  for (std::uint64_t const capacity : capacities)
1484  {
1485  // Leverage all the historical shard paths to
1486  // see if collectively they can fit the specified
1487  // number of shards. For this to work properly,
1488  // each historical path must correspond to a separate
1489  // physical device or filesystem.
1490 
1491  auto const shardCap = capacity / avgShardFileSz_;
1492  if (numShards <= shardCap)
1493  return true;
1494 
1495  numShards -= shardCap;
1496  }
1497  }
1498  catch (std::exception const& e)
1499  {
1500  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1501  << ". Error: " << e.what();
1502  return false;
1503  }
1504 
1505  return false;
1506 }
1507 
1508 bool
1510  std::shared_ptr<Shard>& shard,
1511  std::shared_ptr<Ledger const> const& ledger)
1512 {
1513  if (!shard->setLedgerStored(ledger))
1514  {
1515  // Invalid or corrupt shard, remove it
1516  removeFailedShard(shard);
1517  return false;
1518  }
1519 
1520  if (shard->getState() == Shard::complete)
1521  {
1522  std::lock_guard lock(mutex_);
1523  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1524  {
1525  if (shard->index() == acquireIndex_)
1526  acquireIndex_ = 0;
1527 
1528  finalizeShard(it->second, false, boost::none);
1529  }
1530  else
1531  {
1532  JLOG(j_.debug())
1533  << "shard " << shard->index() << " is no longer being acquired";
1534  }
1535  }
1536 
1537  setFileStats();
1538  return true;
1539 }
1540 
1541 void
1543 {
1544  {
1545  std::lock_guard lock(mutex_);
1546 
1547  if (shard->index() == acquireIndex_)
1548  acquireIndex_ = 0;
1549 
1550  if (shard->index() == latestShardIndex_)
1551  latestShardIndex_ = boost::none;
1552 
1553  if (shard->index() == secondLatestShardIndex_)
1554  secondLatestShardIndex_ = boost::none;
1555 
1556  if ((shards_.erase(shard->index()) > 0) &&
1557  shard->getState() == Shard::final)
1558  {
1559  updateStatus(lock);
1560  }
1561  }
1562 
1563  shard->removeOnDestroy();
1564 
1565  // Reset the shared_ptr to invoke the shard's
1566  // destructor and remove it from the server
1567  shard.reset();
1568  setFileStats();
1569 }
1570 
1573 {
1574  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1575 
1576  if (validIndex < earliestLedgerSeq())
1577  return 0;
1578 
1579  // Shards with an index earlier than the recent shard boundary index
1580  // are considered historical. The three shards at or later than
1581  // this index consist of the two most recently validated shards
1582  // and the shard still in the process of being built by live
1583  // transactions.
1584  return seqToShardIndex(validIndex) - 1;
1585 }
1586 
1589  std::lock_guard<std::mutex> const& lock) const
1590 {
1591  auto const boundaryIndex{shardBoundaryIndex()};
1592  return std::count_if(
1593  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1594  return entry.first < boundaryIndex;
1595  });
1596 }
1597 
1598 void
1600  std::lock_guard<std::mutex> const& lock)
1601 {
1602  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1603  cur || prev)
1604  {
1605  auto const latestShardIndex =
1607 
1608  auto const separateHistoricalPath = !historicalPaths_.empty();
1609 
1610  auto const removeShard =
1611  [this](std::uint32_t const shardIndex) -> void {
1612  canAdd_ = false;
1613 
1614  if (auto it = shards_.find(shardIndex); it != shards_.end())
1615  {
1616  if (it->second)
1617  removeFailedShard(it->second);
1618  else
1619  {
1620  JLOG(j_.warn()) << "can't find shard to remove";
1621  }
1622  }
1623  else
1624  {
1625  JLOG(j_.warn()) << "can't find shard to remove";
1626  }
1627  };
1628 
1629  auto const keepShard =
1630  [this, &lock, removeShard, separateHistoricalPath](
1631  std::uint32_t const shardIndex) -> bool {
1633  {
1634  JLOG(j_.error())
1635  << "maximum number of historical shards reached";
1636 
1637  removeShard(shardIndex);
1638  return false;
1639  }
1640  if (separateHistoricalPath &&
1642  {
1643  JLOG(j_.error()) << "insufficient storage space available";
1644 
1645  removeShard(shardIndex);
1646  return false;
1647  }
1648 
1649  return true;
1650  };
1651 
1652  // Move a shard from the main shard path to a historical shard
1653  // path by copying the contents, and creating a new shard.
1654  auto const moveShard = [this,
1655  &lock](std::uint32_t const shardIndex) -> void {
1656  auto const dst = chooseHistoricalPath(lock);
1657 
1658  if (auto it = shards_.find(shardIndex); it != shards_.end())
1659  {
1660  auto& shard{it->second};
1661 
1662  // Close any open file descriptors before moving the shard
1663  // directory. Don't call removeOnDestroy since that would
1664  // attempt to close the fds after the directory has been moved.
1665  if (!shard->tryClose())
1666  {
1667  JLOG(j_.warn())
1668  << "can't close shard to move to historical path";
1669  return;
1670  }
1671 
1672  try
1673  {
1674  // Move the shard directory to the new path
1675  boost::filesystem::rename(
1676  shard->getDir().string(),
1677  dst / std::to_string(shardIndex));
1678  }
1679  catch (...)
1680  {
1681  JLOG(j_.error()) << "shard " << shardIndex
1682  << " failed to move to historical storage";
1683  return;
1684  }
1685 
1686  // Create a shard instance at the new location
1687  shard =
1688  std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1689 
1690  // Open the new shard
1691  if (!shard->init(scheduler_, *ctx_))
1692  {
1693  JLOG(j_.error()) << "shard " << shardIndex
1694  << " failed to open in historical storage";
1695  shard->removeOnDestroy();
1696  shard.reset();
1697  }
1698  }
1699  else
1700  {
1701  JLOG(j_.warn())
1702  << "can't find shard to move to historical path";
1703  }
1704  };
1705 
1706  // See if either of the recent shards needs to be updated
1707  bool const curNotSynched =
1708  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1709  bool const prevNotSynched = secondLatestShardIndex_ &&
1710  *secondLatestShardIndex_ != latestShardIndex - 1;
1711 
1712  // A new shard has been published. Move outdated
1713  // shards to historical storage as needed
1714  if (curNotSynched || prevNotSynched)
1715  {
1716  if (prev)
1717  {
1718  // Move the formerly second latest shard to historical storage
1719  if (keepShard(*prev) && separateHistoricalPath)
1720  {
1721  moveShard(*prev);
1722  }
1723 
1724  prev = boost::none;
1725  }
1726 
1727  if (cur)
1728  {
1729  // The formerly latest shard is now the second latest
1730  if (cur == latestShardIndex - 1)
1731  {
1732  prev = cur;
1733  }
1734 
1735  // The formerly latest shard is no longer a 'recent' shard
1736  else
1737  {
1738  // Move the formerly latest shard to historical storage
1739  if (keepShard(*cur) && separateHistoricalPath)
1740  {
1741  moveShard(*cur);
1742  }
1743  }
1744 
1745  cur = boost::none;
1746  }
1747  }
1748  }
1749 }
1750 
1751 auto
1753  std::uint32_t shardIndex,
1755  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1756 {
1757  // Any shard earlier than the two most recent shards is a historical shard
1758  auto const boundaryIndex{shardBoundaryIndex()};
1759  auto const isHistoricalShard = shardIndex < boundaryIndex;
1760 
1761  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1764 
1765  // Check shard count and available storage space
1766  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1767  {
1768  JLOG(j_.error()) << "maximum number of historical shards reached";
1769  canAdd_ = false;
1770  return boost::none;
1771  }
1772  if (!sufficientStorage(1, designation, lock))
1773  {
1774  JLOG(j_.error()) << "insufficient storage space available";
1775  canAdd_ = false;
1776  return boost::none;
1777  }
1778 
1779  return designation;
1780 }
1781 
1782 boost::filesystem::path
1784 {
1785  // If not configured with separate historical paths,
1786  // use the main path (dir_) by default.
1787  if (historicalPaths_.empty())
1788  return dir_;
1789 
1790  boost::filesystem::path historicalShardPath;
1791  std::vector<boost::filesystem::path> potentialPaths;
1792 
1793  for (boost::filesystem::path const& path : historicalPaths_)
1794  {
1795  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1796  potentialPaths.push_back(path);
1797  }
1798 
1799  if (potentialPaths.empty())
1800  {
1801  JLOG(j_.error()) << "failed to select a historical shard path";
1802  return "";
1803  }
1804 
1805  std::sample(
1806  potentialPaths.begin(),
1807  potentialPaths.end(),
1808  &historicalShardPath,
1809  1,
1810  default_prng());
1811 
1812  return historicalShardPath;
1813 }
1814 
1815 bool
1817 {
1818 #if BOOST_OS_LINUX
1819  // Each historical shard path must correspond
1820  // to a directory on a distinct device or file system.
1821  // Currently, this constraint is enforced only on Linux.
1824 
1825  for (auto const& path : historicalPaths_)
1826  {
1827  struct statvfs buffer;
1828  if (statvfs(path.c_str(), &buffer))
1829  {
1830  JLOG(j_.error())
1831  << "failed to acquire stats for 'historical_shard_path': "
1832  << path;
1833  return false;
1834  }
1835 
1836  filesystemIDs[buffer.f_fsid].push_back(path.string());
1837  }
1838 
1839  bool ret = true;
1840  for (auto const& entry : filesystemIDs)
1841  {
1842  // Check to see if any of the paths are stored on the same file system
1843  if (entry.second.size() > 1)
1844  {
1845  // Two or more historical storage paths
1846  // correspond to the same file system.
1847  JLOG(j_.error())
1848  << "The following paths correspond to the same filesystem: "
1849  << boost::algorithm::join(entry.second, ", ")
1850  << ". Each configured historical storage path should"
1851  " be on a unique device or filesystem.";
1852 
1853  ret = false;
1854  }
1855  }
1856 
1857  return ret;
1858 
1859 #else
1860  // The requirement that each historical storage path
1861  // corresponds to a distinct device or file system is
1862  // enforced only on Linux, so on other platforms
1863  // keep track of the available capacities for each
1864  // path. Issue a warning if we suspect any of the paths
1865  // may violate this requirement.
1866 
1867  // Map byte counts to each path that shares that byte count.
1869  uniqueCapacities(historicalPaths_.size());
1870 
1871  for (auto const& path : historicalPaths_)
1872  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1873  path.string());
1874 
1875  for (auto const& entry : uniqueCapacities)
1876  {
1877  // Check to see if any paths have the same amount of available bytes.
1878  if (entry.second.size() > 1)
1879  {
1880  // Two or more historical storage paths may
1881  // correspond to the same device or file system.
1882  JLOG(j_.warn())
1883  << "Each of the following paths have " << entry.first
1884  << " bytes free, and may be located on the same device"
1885  " or file system: "
1886  << boost::algorithm::join(entry.second, ", ")
1887  << ". Each configured historical storage path should"
1888  " be on a unique device or file system.";
1889  }
1890  }
1891 #endif
1892 
1893  return true;
1894 }
1895 
1896 //------------------------------------------------------------------------------
1897 
1900  Application& app,
1901  Stoppable& parent,
1902  Scheduler& scheduler,
1903  int readThreads,
1904  beast::Journal j)
1905 {
1906  // The shard store is optional. Future changes will require it.
1907  Section const& section{
1909  if (section.empty())
1910  return nullptr;
1911 
1912  return std::make_unique<DatabaseShardImp>(
1913  app, parent, "ShardStore", scheduler, readThreads, j);
1914 }
1915 
1916 } // namespace NodeStore
1917 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1899
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1196
ripple::Application
Definition: Application.h:101
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:220
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:217
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:108
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:86
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:175
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:234
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:173
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1028
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
ripple::NodeStore::Shard::acquire
static constexpr State acquire
Definition: Shard.h:60
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1565
ripple::SizedItem
SizedItem
Definition: Config.h:48
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::Stoppable::stopped
void stopped()
Called by derived classes to indicate that the stoppable has stopped.
Definition: Stoppable.cpp:72
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:168
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:411
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:215
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:245
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:212
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::CashFilter::none
@ none
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::Database::getBackend
virtual Backend & getBackend()=0
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:182
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:628
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1439
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:239
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1676
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1290
boost
Definition: IPAddress.h:117
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:45
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:205
ripple::NodeStore::Shard::complete
static constexpr State complete
Definition: Shard.h:61
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:301
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:226
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1063
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:200
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:99
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1205
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:223
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:200
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:420
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:977
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::Config::reporting
bool reporting() const
Definition: Config.h:267
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:201
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1783
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1454
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:176
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:690
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:92
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:67
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:194
ripple::Config::standalone
bool standalone() const
Definition: Config.h:262
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1542
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:438
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:994
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1816
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1117
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:238
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:237
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1378
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:191
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:145
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:254
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:188
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:73
ripple::NodeStore::Shard::final
static constexpr State final
Definition: Shard.h:63
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::Database::stopReadThreads
void stopReadThreads()
Definition: Database.cpp:78
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:209
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:174
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1224
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:229
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:741
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:206
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:243
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:550
std::vector::begin
T begin(T... args)
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1588
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:209
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1599
ripple::NodeStore::DatabaseShardImp::onChildrenStopped
void onChildrenStopped() override
Override called when all children have stopped.
Definition: DatabaseShardImp.cpp:712
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1572
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:302
std::unique
T unique(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1633
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1509
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:197
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:244
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::shards_
std::unordered_map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:185
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::NodeStore::DatabaseShardImp::getBackend
Backend & getBackend() override
Definition: DatabaseShardImp.cpp:544
ripple::PublisherStatus::available
@ available
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1752
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:699
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:179
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:203
ripple::NodeStore::Backend
A backend used for the NodeStore.
Definition: Backend.h:39