rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
61  , openFinalLimit_(
62  app.config().getValueFor(SizedItem::openFinalLimit, boost::none))
63 {
64 }
65 
66 bool
68 {
69  {
70  std::lock_guard lock(mutex_);
71  if (init_)
72  {
73  JLOG(j_.error()) << "already initialized";
74  return false;
75  }
76 
77  if (!initConfig(lock))
78  {
79  JLOG(j_.error()) << "invalid configuration file settings";
80  return false;
81  }
82 
83  try
84  {
85  using namespace boost::filesystem;
86 
87  // Consolidate the main storage path and all historical paths
88  std::vector<path> paths{dir_};
89  paths.insert(
90  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
91 
92  for (auto const& path : paths)
93  {
94  if (exists(path))
95  {
96  if (!is_directory(path))
97  {
98  JLOG(j_.error()) << path << " must be a directory";
99  return false;
100  }
101  }
102  else if (!create_directories(path))
103  {
104  JLOG(j_.error())
105  << "failed to create path: " + path.string();
106  return false;
107  }
108  }
109 
111  {
112  // Check historical paths for duplicated file systems
113  if (!checkHistoricalPaths())
114  return false;
115  }
116 
117  ctx_ = std::make_unique<nudb::context>();
118  ctx_->start();
119 
120  // Find shards
121  std::uint32_t openFinals{0};
122  for (auto const& path : paths)
123  {
124  for (auto const& it : directory_iterator(path))
125  {
126  // Ignore files
127  if (!is_directory(it))
128  continue;
129 
130  // Ignore nonnumerical directory names
131  auto const shardDir{it.path()};
132  auto dirName{shardDir.stem().string()};
133  if (!std::all_of(
134  dirName.begin(), dirName.end(), [](auto c) {
135  return ::isdigit(static_cast<unsigned char>(c));
136  }))
137  {
138  continue;
139  }
140 
141  // Ignore values below the earliest shard index
142  auto const shardIndex{std::stoul(dirName)};
143  if (shardIndex < earliestShardIndex())
144  {
145  JLOG(j_.debug())
146  << "shard " << shardIndex
147  << " ignored, comes before earliest shard index "
148  << earliestShardIndex();
149  continue;
150  }
151 
152  // Check if a previous import failed
153  if (is_regular_file(shardDir / importMarker_))
154  {
155  JLOG(j_.warn())
156  << "shard " << shardIndex
157  << " previously failed import, removing";
158  remove_all(shardDir);
159  continue;
160  }
161 
162  auto shard{std::make_shared<Shard>(
163  app_, *this, shardIndex, shardDir.parent_path(), j_)};
164  if (!shard->init(scheduler_, *ctx_))
165  {
166  // Remove corrupted or legacy shard
167  shard->removeOnDestroy();
168  JLOG(j_.warn())
169  << "shard " << shardIndex << " removed, "
170  << (shard->isLegacy() ? "legacy" : "corrupted")
171  << " shard";
172  continue;
173  }
174 
175  switch (shard->getState())
176  {
177  case Shard::final:
178  if (++openFinals > openFinalLimit_)
179  shard->tryClose();
180  shards_.emplace(shardIndex, std::move(shard));
181  break;
182 
183  case Shard::complete:
185  shards_.emplace(shardIndex, std::move(shard))
186  .first->second,
187  true,
188  boost::none);
189  break;
190 
191  case Shard::acquire:
192  if (acquireIndex_ != 0)
193  {
194  JLOG(j_.error())
195  << "more than one shard being acquired";
196  return false;
197  }
198 
199  shards_.emplace(shardIndex, std::move(shard));
200  acquireIndex_ = shardIndex;
201  break;
202 
203  default:
204  JLOG(j_.error())
205  << "shard " << shardIndex << " invalid state";
206  return false;
207  }
208  }
209  }
210  }
211  catch (std::exception const& e)
212  {
213  JLOG(j_.fatal()) << "Exception caught in function " << __func__
214  << ". Error: " << e.what();
215  return false;
216  }
217 
218  updateStatus(lock);
220  init_ = true;
221  }
222 
223  setFileStats();
224  return true;
225 }
226 
227 boost::optional<std::uint32_t>
229 {
230  boost::optional<std::uint32_t> shardIndex;
231 
232  {
233  std::lock_guard lock(mutex_);
234  assert(init_);
235 
236  if (acquireIndex_ != 0)
237  {
238  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
239  return it->second->prepare();
240 
241  // Should never get here
242  assert(false);
243  return boost::none;
244  }
245 
246  if (!canAdd_)
247  return boost::none;
248 
249  shardIndex = findAcquireIndex(validLedgerSeq, lock);
250  }
251 
252  if (!shardIndex)
253  {
254  JLOG(j_.debug()) << "no new shards to add";
255  {
256  std::lock_guard lock(mutex_);
257  canAdd_ = false;
258  }
259  return boost::none;
260  }
261 
262  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
263  std::lock_guard lock(mutex_);
264  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
265  }();
266 
267  if (!pathDesignation)
268  return boost::none;
269 
270  auto const needsHistoricalPath =
271  *pathDesignation == PathDesignation::historical;
272 
273  auto shard = [this, shardIndex, needsHistoricalPath] {
274  std::lock_guard lock(mutex_);
275  return std::make_unique<Shard>(
276  app_,
277  *this,
278  *shardIndex,
279  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
280  j_);
281  }();
282 
283  if (!shard->init(scheduler_, *ctx_))
284  return boost::none;
285 
286  auto const ledgerSeq{shard->prepare()};
287  {
288  std::lock_guard lock(mutex_);
289  shards_.emplace(*shardIndex, std::move(shard));
290  acquireIndex_ = *shardIndex;
291  }
292  return ledgerSeq;
293 }
294 
295 bool
297 {
298  auto fail = [j = j_, &shardIndexes](
299  std::string const& msg,
300  boost::optional<std::uint32_t> shardIndex = boost::none) {
301  auto multipleIndexPrequel = [&shardIndexes] {
302  std::vector<std::string> indexesAsString(shardIndexes.size());
304  shardIndexes.begin(),
305  shardIndexes.end(),
306  indexesAsString.begin(),
307  [](uint32_t const index) { return std::to_string(index); });
308 
309  return std::string("shard") +
310  (shardIndexes.size() > 1 ? "s " : " ") +
311  boost::algorithm::join(indexesAsString, ", ");
312  };
313 
314  std::string const prequel = shardIndex
315  ? "shard " + std::to_string(*shardIndex)
316  : multipleIndexPrequel();
317 
318  JLOG(j.error()) << prequel << " " << msg;
319  return false;
320  };
321 
322  std::lock_guard lock(mutex_);
323  assert(init_);
324 
325  if (!canAdd_)
326  return fail("cannot be stored at this time");
327 
328  auto historicalShardsToPrepare = 0;
329 
330  for (auto const shardIndex : shardIndexes)
331  {
332  if (shardIndex < earliestShardIndex())
333  {
334  return fail(
335  "comes before earliest shard index " +
337  shardIndex);
338  }
339 
340  // If we are synced to the network, check if the shard index is
341  // greater or equal to the current or validated shard index.
342  auto seqCheck = [&](std::uint32_t ledgerSeq) {
343  if (ledgerSeq >= earliestLedgerSeq() &&
344  shardIndex >= seqToShardIndex(ledgerSeq))
345  {
346  return fail("invalid index", shardIndex);
347  }
348  return true;
349  };
350  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
352  {
353  return fail("invalid index", shardIndex);
354  }
355 
356  if (shards_.find(shardIndex) != shards_.end())
357  return fail("is already stored", shardIndex);
358 
359  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
360  return fail("is already queued for import", shardIndex);
361 
362  // Any shard earlier than the two most recent shards
363  // is a historical shard
364  if (shardIndex < shardBoundaryIndex())
365  ++historicalShardsToPrepare;
366  }
367 
368  auto const numHistShards = numHistoricalShards(lock);
369 
370  // Check shard count and available storage space
371  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
372  return fail("maximum number of historical shards reached");
373 
374  if (historicalShardsToPrepare)
375  {
376  // Check available storage space for historical shards
377  if (!sufficientStorage(
378  historicalShardsToPrepare, PathDesignation::historical, lock))
379  return fail("insufficient storage space available");
380  }
381 
382  if (auto const recentShardsToPrepare =
383  shardIndexes.size() - historicalShardsToPrepare;
384  recentShardsToPrepare)
385  {
386  // Check available storage space for recent shards
387  if (!sufficientStorage(
388  recentShardsToPrepare, PathDesignation::none, lock))
389  return fail("insufficient storage space available");
390  }
391 
392  for (auto const shardIndex : shardIndexes)
393  {
394  auto const prepareSuccessful =
395  preparedIndexes_.emplace(shardIndex).second;
396 
397  (void)prepareSuccessful;
398  assert(prepareSuccessful);
399  }
400 
401  return true;
402 }
403 
404 void
406 {
407  std::lock_guard lock(mutex_);
408  assert(init_);
409 
410  preparedIndexes_.erase(shardIndex);
411 }
412 
415 {
417  {
418  std::lock_guard lock(mutex_);
419  assert(init_);
420 
421  for (auto const& shardIndex : preparedIndexes_)
422  rs.insert(shardIndex);
423  }
424 
425  if (rs.empty())
426  return {};
427 
428  return to_string(rs);
429 };
430 
431 bool
433  std::uint32_t shardIndex,
434  boost::filesystem::path const& srcDir)
435 {
436  auto fail = [&](std::string const& msg,
437  std::lock_guard<std::mutex> const& lock) {
438  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
439 
440  // Remove the failed import shard index so it can be retried
441  preparedIndexes_.erase(shardIndex);
442  return false;
443  };
444 
445  using namespace boost::filesystem;
446  try
447  {
448  if (!is_directory(srcDir) || is_empty(srcDir))
449  {
450  return fail(
451  "invalid source directory " + srcDir.string(),
453  }
454  }
455  catch (std::exception const& e)
456  {
457  return fail(
458  std::string(". Exception caught in function ") + __func__ +
459  ". Error: " + e.what(),
461  }
462 
463  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
465  if (!expectedHash)
466  return fail("expected hash not found", std::lock_guard(mutex_));
467 
468  path dstDir;
469  {
470  std::lock_guard lock(mutex_);
471  if (shards_.find(shardIndex) != shards_.end())
472  return fail("already exists", lock);
473 
474  // Check shard was prepared for import
475  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
476  return fail("was not prepared for import", lock);
477 
478  auto const pathDesignation{
479  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
480  if (!pathDesignation)
481  return fail("failed to import", lock);
482 
483  if (*pathDesignation == PathDesignation::historical)
484  dstDir = chooseHistoricalPath(lock);
485  else
486  dstDir = dir_;
487  }
488  dstDir /= std::to_string(shardIndex);
489 
490  auto renameDir = [&](path const& src, path const& dst) {
491  try
492  {
493  rename(src, dst);
494  }
495  catch (std::exception const& e)
496  {
497  return fail(
498  std::string(". Exception caught in function ") + __func__ +
499  ". Error: " + e.what(),
501  }
502  return true;
503  };
504 
505  // Rename source directory to the shard database directory
506  if (!renameDir(srcDir, dstDir))
507  return false;
508 
509  // Create the new shard
510  auto shard{std::make_unique<Shard>(
511  app_, *this, shardIndex, dstDir.parent_path(), j_)};
512 
513  if (!shard->init(scheduler_, *ctx_) || shard->getState() != Shard::complete)
514  {
515  shard.reset();
516  renameDir(dstDir, srcDir);
517  return fail("failed to import", std::lock_guard(mutex_));
518  }
519 
520  auto const [it, inserted] = [&]() {
521  std::lock_guard lock(mutex_);
522  preparedIndexes_.erase(shardIndex);
523  return shards_.emplace(shardIndex, std::move(shard));
524  }();
525 
526  if (!inserted)
527  {
528  shard.reset();
529  renameDir(dstDir, srcDir);
530  return fail("failed to import", std::lock_guard(mutex_));
531  }
532 
533  finalizeShard(it->second, true, expectedHash);
534  return true;
535 }
536 
539 {
540  auto const shardIndex{seqToShardIndex(ledgerSeq)};
541  {
543  {
544  std::lock_guard lock(mutex_);
545  assert(init_);
546 
547  auto const it{shards_.find(shardIndex)};
548  if (it == shards_.end())
549  return nullptr;
550  shard = it->second;
551  }
552 
553  // Ledger must be stored in a final or acquiring shard
554  switch (shard->getState())
555  {
556  case Shard::final:
557  break;
558  case Shard::acquire:
559  if (shard->containsLedger(ledgerSeq))
560  break;
561  [[fallthrough]];
562  default:
563  return nullptr;
564  }
565  }
566 
567  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
568  if (!nodeObject)
569  return nullptr;
570 
571  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
572  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
573  return nullptr;
574  };
575 
576  auto ledger{std::make_shared<Ledger>(
577  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
578  app_.config(),
579  *app_.getShardFamily())};
580 
581  if (ledger->info().seq != ledgerSeq)
582  {
583  return fail(
584  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
585  }
586  if (ledger->info().hash != hash)
587  {
588  return fail(
589  "encountered invalid ledger hash " + to_string(hash) +
590  " on sequence " + std::to_string(ledgerSeq));
591  }
592 
593  ledger->setFull();
594  if (!ledger->stateMap().fetchRoot(
595  SHAMapHash{ledger->info().accountHash}, nullptr))
596  {
597  return fail(
598  "is missing root STATE node on hash " + to_string(hash) +
599  " on sequence " + std::to_string(ledgerSeq));
600  }
601 
602  if (ledger->info().txHash.isNonZero())
603  {
604  if (!ledger->txMap().fetchRoot(
605  SHAMapHash{ledger->info().txHash}, nullptr))
606  {
607  return fail(
608  "is missing root TXN node on hash " + to_string(hash) +
609  " on sequence " + std::to_string(ledgerSeq));
610  }
611  }
612  return ledger;
613 }
614 
615 void
617 {
618  auto const ledgerSeq{ledger->info().seq};
619  if (ledger->info().hash.isZero())
620  {
621  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
622  << ledgerSeq;
623  return;
624  }
625  if (ledger->info().accountHash.isZero())
626  {
627  JLOG(j_.error()) << "zero account hash for ledger sequence "
628  << ledgerSeq;
629  return;
630  }
631  if (ledger->stateMap().getHash().isNonZero() &&
632  !ledger->stateMap().isValid())
633  {
634  JLOG(j_.error()) << "invalid state map for ledger sequence "
635  << ledgerSeq;
636  return;
637  }
638  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
639  {
640  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
641  << ledgerSeq;
642  return;
643  }
644 
645  auto const shardIndex{seqToShardIndex(ledgerSeq)};
647  {
648  std::lock_guard lock(mutex_);
649  assert(init_);
650 
651  if (shardIndex != acquireIndex_)
652  {
653  JLOG(j_.trace())
654  << "shard " << shardIndex << " is not being acquired";
655  return;
656  }
657 
658  auto const it{shards_.find(shardIndex)};
659  if (it == shards_.end())
660  {
661  JLOG(j_.error())
662  << "shard " << shardIndex << " is not being acquired";
663  return;
664  }
665  shard = it->second;
666  }
667 
668  if (shard->containsLedger(ledgerSeq))
669  {
670  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
671  return;
672  }
673 
674  setStoredInShard(shard, ledger);
675 }
676 
679 {
680  std::lock_guard lock(mutex_);
681  assert(init_);
682 
683  return status_;
684 }
685 
686 void
688 {
689  // Stop read threads in base before data members are destroyed
690  stopReadThreads();
691 
692  std::lock_guard lock(mutex_);
693 
694  // Notify shards to stop
695  for (auto const& e : shards_)
696  e.second->stop();
697 }
698 
699 void
701 {
703  {
704  std::lock_guard lock(mutex_);
705 
706  shards.reserve(shards_.size());
707  for (auto const& e : shards_)
708  shards.push_back(e.second);
709  shards_.clear();
710  }
711 
712  // All shards should be expired at this point
713  for (auto const& e : shards)
714  {
715  if (!e.expired())
716  {
717  std::string shardIndex;
718  if (auto const shard{e.lock()}; shard)
719  shardIndex = std::to_string(shard->index());
720 
721  JLOG(j_.warn()) << " shard " << shardIndex << " unexpired";
722  }
723  }
724 
725  if (areChildrenStopped())
726  stopped();
727  else
728  {
729  JLOG(j_.warn()) << " Children failed to stop";
730  }
731 }
732 
733 void
735 {
736  {
737  std::lock_guard lock(mutex_);
738  assert(init_);
739 
740  // Only the application local node store can be imported
741  if (&source != &app_.getNodeStore())
742  {
743  assert(false);
744  JLOG(j_.error()) << "invalid source database";
745  return;
746  }
747 
748  std::uint32_t earliestIndex;
749  std::uint32_t latestIndex;
750  {
751  auto loadLedger = [&](bool ascendSort =
752  true) -> boost::optional<std::uint32_t> {
754  std::uint32_t ledgerSeq;
755  std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper(
756  "WHERE LedgerSeq >= " +
758  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
759  " limit 1",
760  app_,
761  false);
762  if (!ledger || ledgerSeq == 0)
763  {
764  JLOG(j_.error()) << "no suitable ledgers were found in"
765  " the SQLite database to import";
766  return boost::none;
767  }
768  return ledgerSeq;
769  };
770 
771  // Find earliest ledger sequence stored
772  auto ledgerSeq{loadLedger()};
773  if (!ledgerSeq)
774  return;
775  earliestIndex = seqToShardIndex(*ledgerSeq);
776 
777  // Consider only complete shards
778  if (ledgerSeq != firstLedgerSeq(earliestIndex))
779  ++earliestIndex;
780 
781  // Find last ledger sequence stored
782  ledgerSeq = loadLedger(false);
783  if (!ledgerSeq)
784  return;
785  latestIndex = seqToShardIndex(*ledgerSeq);
786 
787  // Consider only complete shards
788  if (ledgerSeq != lastLedgerSeq(latestIndex))
789  --latestIndex;
790 
791  if (latestIndex < earliestIndex)
792  {
793  JLOG(j_.error()) << "no suitable ledgers were found in"
794  " the SQLite database to import";
795  return;
796  }
797  }
798 
799  auto numHistShards = this->numHistoricalShards(lock);
800 
801  // Import the shards
802  for (std::uint32_t shardIndex = earliestIndex;
803  shardIndex <= latestIndex;
804  ++shardIndex)
805  {
806  auto const pathDesignation =
807  prepareForNewShard(shardIndex, numHistShards, lock);
808 
809  if (!pathDesignation)
810  break;
811 
812  auto const needsHistoricalPath =
813  *pathDesignation == PathDesignation::historical;
814 
815  // Skip if being acquired
816  if (shardIndex == acquireIndex_)
817  {
818  JLOG(j_.debug())
819  << "shard " << shardIndex << " already being acquired";
820  continue;
821  }
822 
823  // Skip if being imported
824  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
825  {
826  JLOG(j_.debug())
827  << "shard " << shardIndex << " already being imported";
828  continue;
829  }
830 
831  // Skip if stored
832  if (shards_.find(shardIndex) != shards_.end())
833  {
834  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
835  continue;
836  }
837 
838  // Verify SQLite ledgers are in the node store
839  {
840  auto const firstSeq{firstLedgerSeq(shardIndex)};
841  auto const lastSeq{
842  std::max(firstSeq, lastLedgerSeq(shardIndex))};
843  auto const numLedgers{
844  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
845  : ledgersPerShard_};
846  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
847  if (ledgerHashes.size() != numLedgers)
848  continue;
849 
850  bool valid{true};
851  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
852  {
853  if (!source.fetchNodeObject(ledgerHashes[n].first, n))
854  {
855  JLOG(j_.warn()) << "SQLite ledger sequence " << n
856  << " mismatches node store";
857  valid = false;
858  break;
859  }
860  }
861  if (!valid)
862  continue;
863  }
864 
865  auto const path =
866  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
867 
868  // Create the new shard
869  auto shard{
870  std::make_unique<Shard>(app_, *this, shardIndex, path, j_)};
871  if (!shard->init(scheduler_, *ctx_))
872  continue;
873 
874  // Create a marker file to signify an import in progress
875  auto const shardDir{path / std::to_string(shardIndex)};
876  auto const markerFile{shardDir / importMarker_};
877  {
878  std::ofstream ofs{markerFile.string()};
879  if (!ofs.is_open())
880  {
881  JLOG(j_.error()) << "shard " << shardIndex
882  << " failed to create temp marker file";
883  shard->removeOnDestroy();
884  continue;
885  }
886  ofs.close();
887  }
888 
889  // Copy the ledgers from node store
890  std::shared_ptr<Ledger> recentStored;
891  boost::optional<uint256> lastLedgerHash;
892 
893  while (auto const ledgerSeq = shard->prepare())
894  {
895  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
896  if (!ledger || ledger->info().seq != ledgerSeq)
897  break;
898 
899  auto const result{shard->storeLedger(ledger, recentStored)};
900  storeStats(result.count, result.size);
901  if (result.error)
902  break;
903 
904  if (!shard->setLedgerStored(ledger))
905  break;
906 
907  if (!lastLedgerHash && ledgerSeq == lastLedgerSeq(shardIndex))
908  lastLedgerHash = ledger->info().hash;
909 
910  recentStored = std::move(ledger);
911  }
912 
913  using namespace boost::filesystem;
914  bool success{false};
915  if (lastLedgerHash && shard->getState() == Shard::complete)
916  {
917  // Store shard final key
918  Serializer s;
920  s.add32(firstLedgerSeq(shardIndex));
921  s.add32(lastLedgerSeq(shardIndex));
922  s.addBitString(*lastLedgerHash);
923  auto const nodeObject{NodeObject::createObject(
924  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
925 
926  if (shard->storeNodeObject(nodeObject))
927  {
928  try
929  {
930  // The import process is complete and the
931  // marker file is no longer required
932  remove_all(markerFile);
933 
934  JLOG(j_.debug()) << "shard " << shardIndex
935  << " was successfully imported";
937  shards_.emplace(shardIndex, std::move(shard))
938  .first->second,
939  true,
940  boost::none);
941  success = true;
942 
943  if (shardIndex < shardBoundaryIndex())
944  ++numHistShards;
945  }
946  catch (std::exception const& e)
947  {
948  JLOG(j_.fatal()) << "shard index " << shardIndex
949  << ". Exception caught in function "
950  << __func__ << ". Error: " << e.what();
951  }
952  }
953  }
954 
955  if (!success)
956  {
957  JLOG(j_.error())
958  << "shard " << shardIndex << " failed to import";
959  shard->removeOnDestroy();
960  }
961  }
962 
963  updateStatus(lock);
964  }
965 
966  setFileStats();
967 }
968 
971 {
973  {
974  std::lock_guard lock(mutex_);
975  assert(init_);
976 
977  auto const it{shards_.find(acquireIndex_)};
978  if (it == shards_.end())
979  return 0;
980  shard = it->second;
981  }
982 
983  return shard->getWriteLoad();
984 }
985 
986 void
988  NodeObjectType type,
989  Blob&& data,
990  uint256 const& hash,
991  std::uint32_t ledgerSeq)
992 {
993  auto const shardIndex{seqToShardIndex(ledgerSeq)};
995  {
996  std::lock_guard lock(mutex_);
997  if (shardIndex != acquireIndex_)
998  {
999  JLOG(j_.trace())
1000  << "shard " << shardIndex << " is not being acquired";
1001  return;
1002  }
1003 
1004  auto const it{shards_.find(shardIndex)};
1005  if (it == shards_.end())
1006  {
1007  JLOG(j_.error())
1008  << "shard " << shardIndex << " is not being acquired";
1009  return;
1010  }
1011  shard = it->second;
1012  }
1013 
1014  auto const nodeObject{
1015  NodeObject::createObject(type, std::move(data), hash)};
1016  if (shard->storeNodeObject(nodeObject))
1017  storeStats(1, nodeObject->getData().size());
1018 }
1019 
1020 bool
1022 {
1023  auto const ledgerSeq{srcLedger->info().seq};
1024  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1025  std::shared_ptr<Shard> shard;
1026  {
1027  std::lock_guard lock(mutex_);
1028  assert(init_);
1029 
1030  if (shardIndex != acquireIndex_)
1031  {
1032  JLOG(j_.trace())
1033  << "shard " << shardIndex << " is not being acquired";
1034  return false;
1035  }
1036 
1037  auto const it{shards_.find(shardIndex)};
1038  if (it == shards_.end())
1039  {
1040  JLOG(j_.error())
1041  << "shard " << shardIndex << " is not being acquired";
1042  return false;
1043  }
1044  shard = it->second;
1045  }
1046 
1047  auto const result{shard->storeLedger(srcLedger, nullptr)};
1048  storeStats(result.count, result.size);
1049  if (result.error || result.count == 0 || result.size == 0)
1050  return false;
1051 
1052  return setStoredInShard(shard, srcLedger);
1053 }
1054 
1055 void
1057 {
1059  {
1060  std::lock_guard lock(mutex_);
1061  assert(init_);
1062 
1063  shards.reserve(shards_.size());
1064  for (auto const& e : shards_)
1065  shards.push_back(e.second);
1066  }
1067 
1069  openFinals.reserve(openFinalLimit_);
1070 
1071  for (auto const& e : shards)
1072  {
1073  if (auto const shard{e.lock()}; shard && shard->isOpen())
1074  {
1075  shard->sweep();
1076 
1077  if (shard->getState() == Shard::final)
1078  openFinals.emplace_back(std::move(shard));
1079  }
1080  }
1081 
1082  if (openFinals.size() > openFinalLimit_)
1083  {
1084  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1085  << openFinalLimit_ << " by "
1086  << (openFinals.size() - openFinalLimit_);
1087 
1088  // Try to close enough shards to be within the limit.
1089  // Sort ascending on last use so the oldest are removed first.
1090  std::sort(
1091  openFinals.begin(),
1092  openFinals.end(),
1093  [&](std::shared_ptr<Shard> const& lhsShard,
1094  std::shared_ptr<Shard> const& rhsShard) {
1095  return lhsShard->getLastUse() < rhsShard->getLastUse();
1096  });
1097 
1098  for (auto it{openFinals.cbegin()};
1099  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1100  {
1101  if ((*it)->tryClose())
1102  it = openFinals.erase(it);
1103  else
1104  ++it;
1105  }
1106  }
1107 }
1108 
1109 bool
1111 {
1112  auto fail = [j = j_](std::string const& msg) {
1113  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1114  return false;
1115  };
1116 
1117  Config const& config{app_.config()};
1118  Section const& section{config.section(ConfigSection::shardDatabase())};
1119 
1120  {
1121  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1122  // A custom earliest ledger sequence can be set through the
1123  // configuration file using the 'earliest_seq' field under the
1124  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1125  // have a value greater than zero and be equally assigned in
1126  // both stanzas.
1127 
1128  std::uint32_t shardDBEarliestSeq{0};
1129  get_if_exists<std::uint32_t>(
1130  section, "earliest_seq", shardDBEarliestSeq);
1131 
1132  std::uint32_t nodeDBEarliestSeq{0};
1133  get_if_exists<std::uint32_t>(
1134  config.section(ConfigSection::nodeDatabase()),
1135  "earliest_seq",
1136  nodeDBEarliestSeq);
1137 
1138  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1139  {
1140  return fail(
1141  "and [" + ConfigSection::nodeDatabase() +
1142  "] define different 'earliest_seq' values");
1143  }
1144  }
1145 
1146  using namespace boost::filesystem;
1147  if (!get_if_exists<path>(section, "path", dir_))
1148  return fail("'path' missing");
1149 
1150  {
1151  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1152 
1153  Section const& historicalShardPaths =
1154  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1155 
1156  auto values = historicalShardPaths.values();
1157 
1158  std::sort(values.begin(), values.end());
1159  values.erase(std::unique(values.begin(), values.end()), values.end());
1160 
1161  for (auto const& s : values)
1162  {
1163  auto const dir = path(s);
1164  if (dir_ == dir)
1165  {
1166  return fail(
1167  "the 'path' cannot also be in the "
1168  "'historical_shard_path' section");
1169  }
1170 
1172  }
1173  }
1174 
1175  if (section.exists("ledgers_per_shard"))
1176  {
1177  // To be set only in standalone for testing
1178  if (!config.standalone())
1179  return fail("'ledgers_per_shard' only honored in stand alone");
1180 
1181  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1182  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1183  return fail("'ledgers_per_shard' must be a multiple of 256");
1184 
1187  }
1188 
1189  // NuDB is the default and only supported permanent storage backend
1190  backendName_ = get<std::string>(section, "type", "nudb");
1191  if (!boost::iequals(backendName_, "NuDB"))
1192  return fail("'type' value unsupported");
1193 
1194  return true;
1195 }
1196 
1199  uint256 const& hash,
1200  std::uint32_t ledgerSeq,
1201  FetchReport& fetchReport)
1202 {
1203  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1204  std::shared_ptr<Shard> shard;
1205  {
1206  std::lock_guard lock(mutex_);
1207  auto const it{shards_.find(shardIndex)};
1208  if (it == shards_.end())
1209  return nullptr;
1210  shard = it->second;
1211  }
1212 
1213  return shard->fetchNodeObject(hash, fetchReport);
1214 }
1215 
1216 boost::optional<std::uint32_t>
1218  std::uint32_t validLedgerSeq,
1220 {
1221  if (validLedgerSeq < earliestLedgerSeq())
1222  return boost::none;
1223 
1224  auto const maxShardIndex{[this, validLedgerSeq]() {
1225  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1226  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1227  --shardIndex;
1228  return shardIndex;
1229  }()};
1230  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1231 
1232  // Check if the shard store has all shards
1233  if (shards_.size() >= maxNumShards)
1234  return boost::none;
1235 
1236  if (maxShardIndex < 1024 ||
1237  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1238  {
1239  // Small or mostly full index space to sample
1240  // Find the available indexes and select one at random
1241  std::vector<std::uint32_t> available;
1242  available.reserve(maxNumShards - shards_.size());
1243 
1244  for (auto shardIndex = earliestShardIndex();
1245  shardIndex <= maxShardIndex;
1246  ++shardIndex)
1247  {
1248  if (shards_.find(shardIndex) == shards_.end() &&
1249  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1250  {
1251  available.push_back(shardIndex);
1252  }
1253  }
1254 
1255  if (available.empty())
1256  return boost::none;
1257 
1258  if (available.size() == 1)
1259  return available.front();
1260 
1261  return available[rand_int(
1262  0u, static_cast<std::uint32_t>(available.size() - 1))];
1263  }
1264 
1265  // Large, sparse index space to sample
1266  // Keep choosing indexes at random until an available one is found
1267  // chances of running more than 30 times is less than 1 in a billion
1268  for (int i = 0; i < 40; ++i)
1269  {
1270  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1271  if (shards_.find(shardIndex) == shards_.end() &&
1272  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1273  {
1274  return shardIndex;
1275  }
1276  }
1277 
1278  assert(false);
1279  return boost::none;
1280 }
1281 
1282 void
1284  std::shared_ptr<Shard>& shard,
1285  bool writeSQLite,
1286  boost::optional<uint256> const& expectedHash)
1287 {
1288  taskQueue_->addTask([this,
1289  wptr = std::weak_ptr<Shard>(shard),
1290  writeSQLite,
1291  expectedHash]() {
1292  if (isStopping())
1293  return;
1294 
1295  auto shard{wptr.lock()};
1296  if (!shard)
1297  {
1298  JLOG(j_.debug()) << "Shard removed before being finalized";
1299  return;
1300  }
1301 
1302  if (!shard->finalize(writeSQLite, expectedHash))
1303  {
1304  if (isStopping())
1305  return;
1306 
1307  // Invalid or corrupt shard, remove it
1308  removeFailedShard(shard);
1309  return;
1310  }
1311 
1312  if (isStopping())
1313  return;
1314 
1315  {
1316  auto const boundaryIndex{shardBoundaryIndex()};
1317 
1318  std::lock_guard lock(mutex_);
1319  updateStatus(lock);
1320 
1321  if (shard->index() < boundaryIndex)
1322  {
1323  // This is a historical shard
1324  if (!historicalPaths_.empty() &&
1325  shard->getDir().parent_path() == dir_)
1326  {
1327  // Shard wasn't placed at a separate historical path
1328  JLOG(j_.warn()) << "shard " << shard->index()
1329  << " is not stored at a historical path";
1330  }
1331  }
1332 
1333  else
1334  {
1335  // Not a historical shard. Shift recent shards if necessary
1336  relocateOutdatedShards(lock);
1337  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1338 
1339  auto& recentShard = shard->index() == boundaryIndex
1342 
1343  // Set the appropriate recent shard index
1344  recentShard = shard->index();
1345 
1346  if (shard->getDir().parent_path() != dir_)
1347  {
1348  JLOG(j_.warn()) << "shard " << shard->index()
1349  << " is not stored at the path";
1350  }
1351  }
1352  }
1353 
1354  setFileStats();
1355 
1356  // Update peers with new shard index
1357  if (!app_.config().standalone() &&
1359  {
1360  protocol::TMPeerShardInfo message;
1361  PublicKey const& publicKey{app_.nodeIdentity().first};
1362  message.set_nodepubkey(publicKey.data(), publicKey.size());
1363  message.set_shardindexes(std::to_string(shard->index()));
1364  app_.overlay().foreach(send_always(std::make_shared<Message>(
1365  message, protocol::mtPEER_SHARD_INFO)));
1366  }
1367  });
1368 }
1369 
1370 void
1372 {
1374  {
1375  std::lock_guard lock(mutex_);
1376  if (shards_.empty())
1377  return;
1378 
1379  shards.reserve(shards_.size());
1380  for (auto const& e : shards_)
1381  shards.push_back(e.second);
1382  }
1383 
1384  std::uint64_t sumSz{0};
1385  std::uint32_t sumFd{0};
1386  std::uint32_t numShards{0};
1387  for (auto const& e : shards)
1388  {
1389  if (auto const shard{e.lock()}; shard)
1390  {
1391  auto const [sz, fd] = shard->getFileInfo();
1392  sumSz += sz;
1393  sumFd += fd;
1394  ++numShards;
1395  }
1396  }
1397 
1398  std::lock_guard lock(mutex_);
1399  fileSz_ = sumSz;
1400  fdRequired_ = sumFd;
1401  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1402 
1403  if (!canAdd_)
1404  return;
1405 
1406  if (auto const count = numHistoricalShards(lock);
1407  count >= maxHistoricalShards_)
1408  {
1410  {
1411  // In order to avoid excessive output, don't produce
1412  // this warning if the server isn't configured to
1413  // store historical shards.
1414  JLOG(j_.warn()) << "maximum number of historical shards reached";
1415  }
1416 
1417  canAdd_ = false;
1418  }
1419  else if (!sufficientStorage(
1420  maxHistoricalShards_ - count,
1422  lock))
1423  {
1424  JLOG(j_.warn())
1425  << "maximum shard store size exceeds available storage space";
1426 
1427  canAdd_ = false;
1428  }
1429 }
1430 
1431 void
1433 {
1434  if (!shards_.empty())
1435  {
1437  for (auto const& e : shards_)
1438  if (e.second->getState() == Shard::final)
1439  rs.insert(e.second->index());
1440  status_ = to_string(rs);
1441  }
1442  else
1443  status_.clear();
1444 }
1445 
1446 bool
1448  std::uint32_t numShards,
1449  PathDesignation pathDesignation,
1450  std::lock_guard<std::mutex> const&) const
1451 {
1452  try
1453  {
1454  std::vector<std::uint64_t> capacities;
1455 
1456  if (pathDesignation == PathDesignation::historical &&
1458  {
1459  capacities.reserve(historicalPaths_.size());
1460 
1461  for (auto const& path : historicalPaths_)
1462  {
1463  // Get the available storage for each historical path
1464  auto const availableSpace =
1465  boost::filesystem::space(path).available;
1466 
1467  capacities.push_back(availableSpace);
1468  }
1469  }
1470  else
1471  {
1472  // Get the available storage for the main shard path
1473  capacities.push_back(boost::filesystem::space(dir_).available);
1474  }
1475 
1476  for (std::uint64_t const capacity : capacities)
1477  {
1478  // Leverage all the historical shard paths to
1479  // see if collectively they can fit the specified
1480  // number of shards. For this to work properly,
1481  // each historical path must correspond to a separate
1482  // physical device or filesystem.
1483 
1484  auto const shardCap = capacity / avgShardFileSz_;
1485  if (numShards <= shardCap)
1486  return true;
1487 
1488  numShards -= shardCap;
1489  }
1490  }
1491  catch (std::exception const& e)
1492  {
1493  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1494  << ". Error: " << e.what();
1495  return false;
1496  }
1497 
1498  return false;
1499 }
1500 
1501 bool
1503  std::shared_ptr<Shard>& shard,
1504  std::shared_ptr<Ledger const> const& ledger)
1505 {
1506  if (!shard->setLedgerStored(ledger))
1507  {
1508  // Invalid or corrupt shard, remove it
1509  removeFailedShard(shard);
1510  return false;
1511  }
1512 
1513  if (shard->getState() == Shard::complete)
1514  {
1515  std::lock_guard lock(mutex_);
1516  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1517  {
1518  if (shard->index() == acquireIndex_)
1519  acquireIndex_ = 0;
1520 
1521  finalizeShard(it->second, false, boost::none);
1522  }
1523  else
1524  {
1525  JLOG(j_.debug())
1526  << "shard " << shard->index() << " is no longer being acquired";
1527  }
1528  }
1529 
1530  setFileStats();
1531  return true;
1532 }
1533 
1534 void
1536 {
1537  {
1538  std::lock_guard lock(mutex_);
1539 
1540  if (shard->index() == acquireIndex_)
1541  acquireIndex_ = 0;
1542 
1543  if (shard->index() == latestShardIndex_)
1544  latestShardIndex_ = boost::none;
1545 
1546  if (shard->index() == secondLatestShardIndex_)
1547  secondLatestShardIndex_ = boost::none;
1548 
1549  if ((shards_.erase(shard->index()) > 0) &&
1550  shard->getState() == Shard::final)
1551  {
1552  updateStatus(lock);
1553  }
1554  }
1555 
1556  shard->removeOnDestroy();
1557 
1558  // Reset the shared_ptr to invoke the shard's
1559  // destructor and remove it from the server
1560  shard.reset();
1561  setFileStats();
1562 }
1563 
1566 {
1567  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1568 
1569  if (validIndex < earliestLedgerSeq())
1570  return 0;
1571 
1572  // Shards with an index earlier than the recent shard boundary index
1573  // are considered historical. The three shards at or later than
1574  // this index consist of the two most recently validated shards
1575  // and the shard still in the process of being built by live
1576  // transactions.
1577  return seqToShardIndex(validIndex) - 1;
1578 }
1579 
1582  std::lock_guard<std::mutex> const& lock) const
1583 {
1584  auto const boundaryIndex{shardBoundaryIndex()};
1585  return std::count_if(
1586  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1587  return entry.first < boundaryIndex;
1588  });
1589 }
1590 
1591 void
1593  std::lock_guard<std::mutex> const& lock)
1594 {
1595  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1596  cur || prev)
1597  {
1598  auto const latestShardIndex =
1600 
1601  auto const separateHistoricalPath = !historicalPaths_.empty();
1602 
1603  auto const removeShard =
1604  [this](std::uint32_t const shardIndex) -> void {
1605  canAdd_ = false;
1606 
1607  if (auto it = shards_.find(shardIndex); it != shards_.end())
1608  {
1609  if (it->second)
1610  removeFailedShard(it->second);
1611  else
1612  {
1613  JLOG(j_.warn()) << "can't find shard to remove";
1614  }
1615  }
1616  else
1617  {
1618  JLOG(j_.warn()) << "can't find shard to remove";
1619  }
1620  };
1621 
1622  auto const keepShard =
1623  [this, &lock, removeShard, separateHistoricalPath](
1624  std::uint32_t const shardIndex) -> bool {
1626  {
1627  JLOG(j_.error())
1628  << "maximum number of historical shards reached";
1629 
1630  removeShard(shardIndex);
1631  return false;
1632  }
1633  if (separateHistoricalPath &&
1635  {
1636  JLOG(j_.error()) << "insufficient storage space available";
1637 
1638  removeShard(shardIndex);
1639  return false;
1640  }
1641 
1642  return true;
1643  };
1644 
1645  // Move a shard from the main shard path to a historical shard
1646  // path by copying the contents, and creating a new shard.
1647  auto const moveShard = [this,
1648  &lock](std::uint32_t const shardIndex) -> void {
1649  auto const dst = chooseHistoricalPath(lock);
1650 
1651  if (auto it = shards_.find(shardIndex); it != shards_.end())
1652  {
1653  auto& shard{it->second};
1654 
1655  // Close any open file descriptors before moving the shard
1656  // directory. Don't call removeOnDestroy since that would
1657  // attempt to close the fds after the directory has been moved.
1658  if (!shard->tryClose())
1659  {
1660  JLOG(j_.warn())
1661  << "can't close shard to move to historical path";
1662  return;
1663  }
1664 
1665  try
1666  {
1667  // Move the shard directory to the new path
1668  boost::filesystem::rename(
1669  shard->getDir().string(),
1670  dst / std::to_string(shardIndex));
1671  }
1672  catch (...)
1673  {
1674  JLOG(j_.error()) << "shard " << shardIndex
1675  << " failed to move to historical storage";
1676  return;
1677  }
1678 
1679  // Create a shard instance at the new location
1680  shard =
1681  std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1682 
1683  // Open the new shard
1684  if (!shard->init(scheduler_, *ctx_))
1685  {
1686  JLOG(j_.error()) << "shard " << shardIndex
1687  << " failed to open in historical storage";
1688  shard->removeOnDestroy();
1689  shard.reset();
1690  }
1691  }
1692  else
1693  {
1694  JLOG(j_.warn())
1695  << "can't find shard to move to historical path";
1696  }
1697  };
1698 
1699  // See if either of the recent shards needs to be updated
1700  bool const curNotSynched =
1701  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1702  bool const prevNotSynched = secondLatestShardIndex_ &&
1703  *secondLatestShardIndex_ != latestShardIndex - 1;
1704 
1705  // A new shard has been published. Move outdated
1706  // shards to historical storage as needed
1707  if (curNotSynched || prevNotSynched)
1708  {
1709  if (prev)
1710  {
1711  // Move the formerly second latest shard to historical storage
1712  if (keepShard(*prev) && separateHistoricalPath)
1713  {
1714  moveShard(*prev);
1715  }
1716 
1717  prev = boost::none;
1718  }
1719 
1720  if (cur)
1721  {
1722  // The formerly latest shard is now the second latest
1723  if (cur == latestShardIndex - 1)
1724  {
1725  prev = cur;
1726  }
1727 
1728  // The formerly latest shard is no longer a 'recent' shard
1729  else
1730  {
1731  // Move the formerly latest shard to historical storage
1732  if (keepShard(*cur) && separateHistoricalPath)
1733  {
1734  moveShard(*cur);
1735  }
1736  }
1737 
1738  cur = boost::none;
1739  }
1740  }
1741  }
1742 }
1743 
1744 auto
1746  std::uint32_t shardIndex,
1748  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1749 {
1750  // Any shard earlier than the two most recent shards is a historical shard
1751  auto const boundaryIndex{shardBoundaryIndex()};
1752  auto const isHistoricalShard = shardIndex < boundaryIndex;
1753 
1754  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1757 
1758  // Check shard count and available storage space
1759  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1760  {
1761  JLOG(j_.error()) << "maximum number of historical shards reached";
1762  canAdd_ = false;
1763  return boost::none;
1764  }
1765  if (!sufficientStorage(1, designation, lock))
1766  {
1767  JLOG(j_.error()) << "insufficient storage space available";
1768  canAdd_ = false;
1769  return boost::none;
1770  }
1771 
1772  return designation;
1773 }
1774 
1775 boost::filesystem::path
1777 {
1778  // If not configured with separate historical paths,
1779  // use the main path (dir_) by default.
1780  if (historicalPaths_.empty())
1781  return dir_;
1782 
1783  boost::filesystem::path historicalShardPath;
1784  std::vector<boost::filesystem::path> potentialPaths;
1785 
1786  for (boost::filesystem::path const& path : historicalPaths_)
1787  {
1788  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1789  potentialPaths.push_back(path);
1790  }
1791 
1792  if (potentialPaths.empty())
1793  {
1794  JLOG(j_.error()) << "failed to select a historical shard path";
1795  return "";
1796  }
1797 
1798  std::sample(
1799  potentialPaths.begin(),
1800  potentialPaths.end(),
1801  &historicalShardPath,
1802  1,
1803  default_prng());
1804 
1805  return historicalShardPath;
1806 }
1807 
1808 bool
1810 {
1811 #if BOOST_OS_LINUX
1812  // Each historical shard path must correspond
1813  // to a directory on a distinct device or file system.
1814  // Currently, this constraint is enforced only on Linux.
1817 
1818  for (auto const& path : historicalPaths_)
1819  {
1820  struct statvfs buffer;
1821  if (statvfs(path.c_str(), &buffer))
1822  {
1823  JLOG(j_.error())
1824  << "failed to acquire stats for 'historical_shard_path': "
1825  << path;
1826  return false;
1827  }
1828 
1829  filesystemIDs[buffer.f_fsid].push_back(path.string());
1830  }
1831 
1832  bool ret = true;
1833  for (auto const& entry : filesystemIDs)
1834  {
1835  // Check to see if any of the paths are stored on the same file system
1836  if (entry.second.size() > 1)
1837  {
1838  // Two or more historical storage paths
1839  // correspond to the same file system.
1840  JLOG(j_.error())
1841  << "The following paths correspond to the same filesystem: "
1842  << boost::algorithm::join(entry.second, ", ")
1843  << ". Each configured historical storage path should"
1844  " be on a unique device or filesystem.";
1845 
1846  ret = false;
1847  }
1848  }
1849 
1850  return ret;
1851 
1852 #else
1853  // The requirement that each historical storage path
1854  // corresponds to a distinct device or file system is
1855  // enforced only on Linux, so on other platforms
1856  // keep track of the available capacities for each
1857  // path. Issue a warning if we suspect any of the paths
1858  // may violate this requirement.
1859 
1860  // Map byte counts to each path that shares that byte count.
1862  uniqueCapacities(historicalPaths_.size());
1863 
1864  for (auto const& path : historicalPaths_)
1865  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1866  path.string());
1867 
1868  for (auto const& entry : uniqueCapacities)
1869  {
1870  // Check to see if any paths have the same amount of available bytes.
1871  if (entry.second.size() > 1)
1872  {
1873  // Two or more historical storage paths may
1874  // correspond to the same device or file system.
1875  JLOG(j_.warn())
1876  << "Each of the following paths have " << entry.first
1877  << " bytes free, and may be located on the same device"
1878  " or file system: "
1879  << boost::algorithm::join(entry.second, ", ")
1880  << ". Each configured historical storage path should"
1881  " be on a unique device or file system.";
1882  }
1883  }
1884 #endif
1885 
1886  return true;
1887 }
1888 
1889 //------------------------------------------------------------------------------
1890 
1893  Application& app,
1894  Stoppable& parent,
1895  Scheduler& scheduler,
1896  int readThreads,
1897  beast::Journal j)
1898 {
1899  // The shard store is optional. Future changes will require it.
1900  Section const& section{
1902  if (section.empty())
1903  return nullptr;
1904 
1905  return std::make_unique<DatabaseShardImp>(
1906  app, parent, "ShardStore", scheduler, readThreads, j);
1907 }
1908 
1909 } // namespace NodeStore
1910 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1892
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1137
ripple::Application
Definition: Application.h:97
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:214
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:211
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:108
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:86
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:169
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:228
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:167
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1021
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
ripple::NodeStore::Shard::acquire
static constexpr State acquire
Definition: Shard.h:61
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1229
ripple::SizedItem
SizedItem
Definition: Config.h:48
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::Stoppable::stopped
void stopped()
Called by derived classes to indicate that the stoppable has stopped.
Definition: Stoppable.cpp:72
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:162
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:405
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:236
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:206
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::CashFilter::none
@ none
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:176
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:616
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1432
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:233
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1604
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1283
boost
Definition: IPAddress.h:117
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:206
ripple::NodeStore::Shard::complete
static constexpr State complete
Definition: Shard.h:62
std::vector::front
T front(T... args)
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:220
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1056
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:298
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:201
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:99
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1198
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:217
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:194
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:414
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:970
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:201
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1776
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1447
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:170
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:678
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:92
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:67
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:188
ripple::Config::standalone
bool standalone() const
Definition: Config.h:247
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1535
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::Stoppable::areChildrenStopped
bool areChildrenStopped() const
Returns true if all children have stopped.
Definition: Stoppable.cpp:66
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:432
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:987
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1809
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1110
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:232
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:228
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1371
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:185
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:142
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:245
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:182
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:67
ripple::NodeStore::Shard::final
static constexpr State final
Definition: Shard.h:64
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::Database::stopReadThreads
void stopReadThreads()
Definition: Database.cpp:75
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:203
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:168
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1217
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:223
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:734
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:200
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:234
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:538
std::vector::begin
T begin(T... args)
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1581
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1592
ripple::NodeStore::DatabaseShardImp::onChildrenStopped
void onChildrenStopped() override
Override called when all children have stopped.
Definition: DatabaseShardImp.cpp:700
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1565
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:296
std::unique
T unique(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1291
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1502
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:191
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:235
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::shards_
std::unordered_map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:179
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1745
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:687
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:173
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:197