rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
61  , openFinalLimit_(
62  app.config().getValueFor(SizedItem::openFinalLimit, boost::none))
63 {
64 }
65 
66 bool
68 {
69  {
70  std::lock_guard lock(mutex_);
71  if (init_)
72  {
73  JLOG(j_.error()) << "already initialized";
74  return false;
75  }
76 
77  if (!initConfig(lock))
78  {
79  JLOG(j_.error()) << "invalid configuration file settings";
80  return false;
81  }
82 
83  try
84  {
85  using namespace boost::filesystem;
86 
87  // Consolidate the main storage path and all historical paths
88  std::vector<path> paths{dir_};
89  paths.insert(
90  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
91 
92  for (auto const& path : paths)
93  {
94  if (exists(path))
95  {
96  if (!is_directory(path))
97  {
98  JLOG(j_.error()) << path << " must be a directory";
99  return false;
100  }
101  }
102  else if (!create_directories(path))
103  {
104  JLOG(j_.error())
105  << "failed to create path: " + path.string();
106  return false;
107  }
108  }
109 
111  {
112  // Check historical paths for duplicated file systems
113  if (!checkHistoricalPaths())
114  return false;
115  }
116 
117  ctx_ = std::make_unique<nudb::context>();
118  ctx_->start();
119 
120  // Find shards
121  std::uint32_t openFinals{0};
122  for (auto const& path : paths)
123  {
124  for (auto const& it : directory_iterator(path))
125  {
126  // Ignore files
127  if (!is_directory(it))
128  continue;
129 
130  // Ignore nonnumerical directory names
131  auto const shardDir{it.path()};
132  auto dirName{shardDir.stem().string()};
133  if (!std::all_of(
134  dirName.begin(), dirName.end(), [](auto c) {
135  return ::isdigit(static_cast<unsigned char>(c));
136  }))
137  {
138  continue;
139  }
140 
141  // Ignore values below the earliest shard index
142  auto const shardIndex{std::stoul(dirName)};
143  if (shardIndex < earliestShardIndex())
144  {
145  JLOG(j_.debug())
146  << "shard " << shardIndex
147  << " ignored, comes before earliest shard index "
148  << earliestShardIndex();
149  continue;
150  }
151 
152  // Check if a previous import failed
153  if (is_regular_file(shardDir / importMarker_))
154  {
155  JLOG(j_.warn())
156  << "shard " << shardIndex
157  << " previously failed import, removing";
158  remove_all(shardDir);
159  continue;
160  }
161 
162  auto shard{std::make_shared<Shard>(
163  app_, *this, shardIndex, shardDir.parent_path(), j_)};
164  if (!shard->init(scheduler_, *ctx_))
165  {
166  // Remove corrupted or legacy shard
167  shard->removeOnDestroy();
168  JLOG(j_.warn())
169  << "shard " << shardIndex << " removed, "
170  << (shard->isLegacy() ? "legacy" : "corrupted")
171  << " shard";
172  continue;
173  }
174 
175  switch (shard->getState())
176  {
177  case Shard::final:
178  if (++openFinals > openFinalLimit_)
179  shard->tryClose();
180  shards_.emplace(shardIndex, std::move(shard));
181  break;
182 
183  case Shard::complete:
185  shards_.emplace(shardIndex, std::move(shard))
186  .first->second,
187  true,
188  boost::none);
189  break;
190 
191  case Shard::acquire:
192  if (acquireIndex_ != 0)
193  {
194  JLOG(j_.error())
195  << "more than one shard being acquired";
196  return false;
197  }
198 
199  shards_.emplace(shardIndex, std::move(shard));
200  acquireIndex_ = shardIndex;
201  break;
202 
203  default:
204  JLOG(j_.error())
205  << "shard " << shardIndex << " invalid state";
206  return false;
207  }
208  }
209  }
210  }
211  catch (std::exception const& e)
212  {
213  JLOG(j_.fatal()) << "Exception caught in function " << __func__
214  << ". Error: " << e.what();
215  return false;
216  }
217 
218  updateStatus(lock);
220  init_ = true;
221  }
222 
223  setFileStats();
224  return true;
225 }
226 
227 boost::optional<std::uint32_t>
229 {
230  boost::optional<std::uint32_t> shardIndex;
231 
232  {
233  std::lock_guard lock(mutex_);
234  assert(init_);
235 
236  if (acquireIndex_ != 0)
237  {
238  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
239  return it->second->prepare();
240 
241  // Should never get here
242  assert(false);
243  return boost::none;
244  }
245 
246  if (!canAdd_)
247  return boost::none;
248 
249  shardIndex = findAcquireIndex(validLedgerSeq, lock);
250  }
251 
252  if (!shardIndex)
253  {
254  JLOG(j_.debug()) << "no new shards to add";
255  {
256  std::lock_guard lock(mutex_);
257  canAdd_ = false;
258  }
259  return boost::none;
260  }
261 
262  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
263  std::lock_guard lock(mutex_);
264  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
265  }();
266 
267  if (!pathDesignation)
268  return boost::none;
269 
270  auto const needsHistoricalPath =
271  *pathDesignation == PathDesignation::historical;
272 
273  auto shard = [this, shardIndex, needsHistoricalPath] {
274  std::lock_guard lock(mutex_);
275  return std::make_unique<Shard>(
276  app_,
277  *this,
278  *shardIndex,
279  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
280  j_);
281  }();
282 
283  if (!shard->init(scheduler_, *ctx_))
284  return boost::none;
285 
286  auto const ledgerSeq{shard->prepare()};
287  {
288  std::lock_guard lock(mutex_);
289  shards_.emplace(*shardIndex, std::move(shard));
290  acquireIndex_ = *shardIndex;
291  }
292  return ledgerSeq;
293 }
294 
295 bool
297 {
298  auto fail = [j = j_, &shardIndexes](
299  std::string const& msg,
300  boost::optional<std::uint32_t> shardIndex = boost::none) {
301  auto multipleIndexPrequel = [&shardIndexes] {
302  std::vector<std::string> indexesAsString(shardIndexes.size());
304  shardIndexes.begin(),
305  shardIndexes.end(),
306  indexesAsString.begin(),
307  [](uint32_t const index) { return std::to_string(index); });
308 
309  return std::string("shard") +
310  (shardIndexes.size() > 1 ? "s " : " ") +
311  boost::algorithm::join(indexesAsString, ", ");
312  };
313 
314  std::string const prequel = shardIndex
315  ? "shard " + std::to_string(*shardIndex)
316  : multipleIndexPrequel();
317 
318  JLOG(j.error()) << prequel << " " << msg;
319  return false;
320  };
321 
322  std::lock_guard lock(mutex_);
323  assert(init_);
324 
325  if (!canAdd_)
326  return fail("cannot be stored at this time");
327 
328  auto historicalShardsToPrepare = 0;
329 
330  for (auto const shardIndex : shardIndexes)
331  {
332  if (shardIndex < earliestShardIndex())
333  {
334  return fail(
335  "comes before earliest shard index " +
337  shardIndex);
338  }
339 
340  // If we are synced to the network, check if the shard index is
341  // greater or equal to the current or validated shard index.
342  auto seqCheck = [&](std::uint32_t ledgerSeq) {
343  if (ledgerSeq >= earliestLedgerSeq() &&
344  shardIndex >= seqToShardIndex(ledgerSeq))
345  {
346  return fail("invalid index", shardIndex);
347  }
348  return true;
349  };
350  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
352  {
353  return fail("invalid index", shardIndex);
354  }
355 
356  if (shards_.find(shardIndex) != shards_.end())
357  return fail("is already stored", shardIndex);
358 
359  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
360  return fail("is already queued for import", shardIndex);
361 
362  // Any shard earlier than the two most recent shards
363  // is a historical shard
364  if (shardIndex < shardBoundaryIndex())
365  ++historicalShardsToPrepare;
366  }
367 
368  auto const numHistShards = numHistoricalShards(lock);
369 
370  // Check shard count and available storage space
371  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
372  return fail("maximum number of historical shards reached");
373 
374  if (historicalShardsToPrepare)
375  {
376  // Check available storage space for historical shards
377  if (!sufficientStorage(
378  historicalShardsToPrepare, PathDesignation::historical, lock))
379  return fail("insufficient storage space available");
380  }
381 
382  if (auto const recentShardsToPrepare =
383  shardIndexes.size() - historicalShardsToPrepare;
384  recentShardsToPrepare)
385  {
386  // Check available storage space for recent shards
387  if (!sufficientStorage(
388  recentShardsToPrepare, PathDesignation::none, lock))
389  return fail("insufficient storage space available");
390  }
391 
392  for (auto const shardIndex : shardIndexes)
393  {
394  auto const prepareSuccessful =
395  preparedIndexes_.emplace(shardIndex).second;
396 
397  (void)prepareSuccessful;
398  assert(prepareSuccessful);
399  }
400 
401  return true;
402 }
403 
404 void
406 {
407  std::lock_guard lock(mutex_);
408  assert(init_);
409 
410  preparedIndexes_.erase(shardIndex);
411 }
412 
415 {
417  {
418  std::lock_guard lock(mutex_);
419  assert(init_);
420 
421  for (auto const& shardIndex : preparedIndexes_)
422  rs.insert(shardIndex);
423  }
424 
425  if (rs.empty())
426  return {};
427 
428  return to_string(rs);
429 };
430 
431 bool
433  std::uint32_t shardIndex,
434  boost::filesystem::path const& srcDir)
435 {
436  auto fail = [&](std::string const& msg,
437  std::lock_guard<std::mutex> const& lock) {
438  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
439 
440  // Remove the failed import shard index so it can be retried
441  preparedIndexes_.erase(shardIndex);
442  return false;
443  };
444 
445  using namespace boost::filesystem;
446  try
447  {
448  if (!is_directory(srcDir) || is_empty(srcDir))
449  {
450  return fail(
451  "invalid source directory " + srcDir.string(),
453  }
454  }
455  catch (std::exception const& e)
456  {
457  return fail(
458  std::string(". Exception caught in function ") + __func__ +
459  ". Error: " + e.what(),
461  }
462 
463  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
465  if (!expectedHash)
466  return fail("expected hash not found", std::lock_guard(mutex_));
467 
468  path dstDir;
469  {
470  std::lock_guard lock(mutex_);
471  if (shards_.find(shardIndex) != shards_.end())
472  return fail("already exists", lock);
473 
474  // Check shard was prepared for import
475  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
476  return fail("was not prepared for import", lock);
477 
478  auto const pathDesignation{
479  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
480  if (!pathDesignation)
481  return fail("failed to import", lock);
482 
483  if (*pathDesignation == PathDesignation::historical)
484  dstDir = chooseHistoricalPath(lock);
485  else
486  dstDir = dir_;
487  }
488  dstDir /= std::to_string(shardIndex);
489 
490  auto renameDir = [&](path const& src, path const& dst) {
491  try
492  {
493  rename(src, dst);
494  }
495  catch (std::exception const& e)
496  {
497  return fail(
498  std::string(". Exception caught in function ") + __func__ +
499  ". Error: " + e.what(),
501  }
502  return true;
503  };
504 
505  // Rename source directory to the shard database directory
506  if (!renameDir(srcDir, dstDir))
507  return false;
508 
509  // Create the new shard
510  auto shard{std::make_unique<Shard>(
511  app_, *this, shardIndex, dstDir.parent_path(), j_)};
512 
513  if (!shard->init(scheduler_, *ctx_) || shard->getState() != Shard::complete)
514  {
515  shard.reset();
516  renameDir(dstDir, srcDir);
517  return fail("failed to import", std::lock_guard(mutex_));
518  }
519 
520  auto const [it, inserted] = [&]() {
521  std::lock_guard lock(mutex_);
522  preparedIndexes_.erase(shardIndex);
523  return shards_.emplace(shardIndex, std::move(shard));
524  }();
525 
526  if (!inserted)
527  {
528  shard.reset();
529  renameDir(dstDir, srcDir);
530  return fail("failed to import", std::lock_guard(mutex_));
531  }
532 
533  finalizeShard(it->second, true, expectedHash);
534  return true;
535 }
536 
539 {
540  auto const shardIndex{seqToShardIndex(ledgerSeq)};
541  {
543  {
544  std::lock_guard lock(mutex_);
545  assert(init_);
546 
547  auto const it{shards_.find(shardIndex)};
548  if (it == shards_.end())
549  return nullptr;
550  shard = it->second;
551  }
552 
553  // Ledger must be stored in a final or acquiring shard
554  switch (shard->getState())
555  {
556  case Shard::final:
557  break;
558  case Shard::acquire:
559  if (shard->containsLedger(ledgerSeq))
560  break;
561  [[fallthrough]];
562  default:
563  return nullptr;
564  }
565  }
566 
567  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
568  if (!nodeObject)
569  return nullptr;
570 
571  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
572  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
573  return nullptr;
574  };
575 
576  auto ledger{std::make_shared<Ledger>(
577  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
578  app_.config(),
579  *app_.getShardFamily())};
580 
581  if (ledger->info().seq != ledgerSeq)
582  {
583  return fail(
584  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
585  }
586  if (ledger->info().hash != hash)
587  {
588  return fail(
589  "encountered invalid ledger hash " + to_string(hash) +
590  " on sequence " + std::to_string(ledgerSeq));
591  }
592 
593  ledger->setFull();
594  if (!ledger->stateMap().fetchRoot(
595  SHAMapHash{ledger->info().accountHash}, nullptr))
596  {
597  return fail(
598  "is missing root STATE node on hash " + to_string(hash) +
599  " on sequence " + std::to_string(ledgerSeq));
600  }
601 
602  if (ledger->info().txHash.isNonZero())
603  {
604  if (!ledger->txMap().fetchRoot(
605  SHAMapHash{ledger->info().txHash}, nullptr))
606  {
607  return fail(
608  "is missing root TXN node on hash " + to_string(hash) +
609  " on sequence " + std::to_string(ledgerSeq));
610  }
611  }
612  return ledger;
613 }
614 
615 void
617 {
618  auto const ledgerSeq{ledger->info().seq};
619  if (ledger->info().hash.isZero())
620  {
621  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
622  << ledgerSeq;
623  return;
624  }
625  if (ledger->info().accountHash.isZero())
626  {
627  JLOG(j_.error()) << "zero account hash for ledger sequence "
628  << ledgerSeq;
629  return;
630  }
631  if (ledger->stateMap().getHash().isNonZero() &&
632  !ledger->stateMap().isValid())
633  {
634  JLOG(j_.error()) << "invalid state map for ledger sequence "
635  << ledgerSeq;
636  return;
637  }
638  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
639  {
640  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
641  << ledgerSeq;
642  return;
643  }
644 
645  auto const shardIndex{seqToShardIndex(ledgerSeq)};
647  {
648  std::lock_guard lock(mutex_);
649  assert(init_);
650 
651  if (shardIndex != acquireIndex_)
652  {
653  JLOG(j_.trace())
654  << "shard " << shardIndex << " is not being acquired";
655  return;
656  }
657 
658  auto const it{shards_.find(shardIndex)};
659  if (it == shards_.end())
660  {
661  JLOG(j_.error())
662  << "shard " << shardIndex << " is not being acquired";
663  return;
664  }
665  shard = it->second;
666  }
667 
668  if (shard->containsLedger(ledgerSeq))
669  {
670  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
671  return;
672  }
673 
674  setStoredInShard(shard, ledger);
675 }
676 
679 {
680  std::lock_guard lock(mutex_);
681  assert(init_);
682 
683  return status_;
684 }
685 
686 void
688 {
689  // Stop read threads in base before data members are destroyed
690  stopReadThreads();
691 
692  std::lock_guard lock(mutex_);
693 
694  // Notify shards to stop
695  for (auto const& e : shards_)
696  e.second->stop();
697 }
698 
699 void
701 {
703  {
704  std::lock_guard lock(mutex_);
705 
706  shards.reserve(shards_.size());
707  for (auto const& e : shards_)
708  shards.push_back(e.second);
709  shards_.clear();
710  }
711 
712  // All shards should be expired at this point
713  for (auto const& e : shards)
714  {
715  if (!e.expired())
716  {
717  std::string shardIndex;
718  if (auto const shard{e.lock()}; shard)
719  shardIndex = std::to_string(shard->index());
720 
721  JLOG(j_.warn()) << " shard " << shardIndex << " unexpired";
722  }
723  }
724 
725  stopped();
726 }
727 
728 void
730 {
731  {
732  std::lock_guard lock(mutex_);
733  assert(init_);
734 
735  // Only the application local node store can be imported
736  if (&source != &app_.getNodeStore())
737  {
738  assert(false);
739  JLOG(j_.error()) << "invalid source database";
740  return;
741  }
742 
743  std::uint32_t earliestIndex;
744  std::uint32_t latestIndex;
745  {
746  auto loadLedger = [&](bool ascendSort =
747  true) -> boost::optional<std::uint32_t> {
749  std::uint32_t ledgerSeq;
750  std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper(
751  "WHERE LedgerSeq >= " +
753  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
754  " limit 1",
755  app_,
756  false);
757  if (!ledger || ledgerSeq == 0)
758  {
759  JLOG(j_.error()) << "no suitable ledgers were found in"
760  " the SQLite database to import";
761  return boost::none;
762  }
763  return ledgerSeq;
764  };
765 
766  // Find earliest ledger sequence stored
767  auto ledgerSeq{loadLedger()};
768  if (!ledgerSeq)
769  return;
770  earliestIndex = seqToShardIndex(*ledgerSeq);
771 
772  // Consider only complete shards
773  if (ledgerSeq != firstLedgerSeq(earliestIndex))
774  ++earliestIndex;
775 
776  // Find last ledger sequence stored
777  ledgerSeq = loadLedger(false);
778  if (!ledgerSeq)
779  return;
780  latestIndex = seqToShardIndex(*ledgerSeq);
781 
782  // Consider only complete shards
783  if (ledgerSeq != lastLedgerSeq(latestIndex))
784  --latestIndex;
785 
786  if (latestIndex < earliestIndex)
787  {
788  JLOG(j_.error()) << "no suitable ledgers were found in"
789  " the SQLite database to import";
790  return;
791  }
792  }
793 
794  auto numHistShards = this->numHistoricalShards(lock);
795 
796  // Import the shards
797  for (std::uint32_t shardIndex = earliestIndex;
798  shardIndex <= latestIndex;
799  ++shardIndex)
800  {
801  auto const pathDesignation =
802  prepareForNewShard(shardIndex, numHistShards, lock);
803 
804  if (!pathDesignation)
805  break;
806 
807  auto const needsHistoricalPath =
808  *pathDesignation == PathDesignation::historical;
809 
810  // Skip if being acquired
811  if (shardIndex == acquireIndex_)
812  {
813  JLOG(j_.debug())
814  << "shard " << shardIndex << " already being acquired";
815  continue;
816  }
817 
818  // Skip if being imported
819  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
820  {
821  JLOG(j_.debug())
822  << "shard " << shardIndex << " already being imported";
823  continue;
824  }
825 
826  // Skip if stored
827  if (shards_.find(shardIndex) != shards_.end())
828  {
829  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
830  continue;
831  }
832 
833  // Verify SQLite ledgers are in the node store
834  {
835  auto const firstSeq{firstLedgerSeq(shardIndex)};
836  auto const lastSeq{
837  std::max(firstSeq, lastLedgerSeq(shardIndex))};
838  auto const numLedgers{
839  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
840  : ledgersPerShard_};
841  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
842  if (ledgerHashes.size() != numLedgers)
843  continue;
844 
845  bool valid{true};
846  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
847  {
848  if (!source.fetchNodeObject(ledgerHashes[n].first, n))
849  {
850  JLOG(j_.warn()) << "SQLite ledger sequence " << n
851  << " mismatches node store";
852  valid = false;
853  break;
854  }
855  }
856  if (!valid)
857  continue;
858  }
859 
860  auto const path =
861  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
862 
863  // Create the new shard
864  auto shard{
865  std::make_unique<Shard>(app_, *this, shardIndex, path, j_)};
866  if (!shard->init(scheduler_, *ctx_))
867  continue;
868 
869  // Create a marker file to signify an import in progress
870  auto const shardDir{path / std::to_string(shardIndex)};
871  auto const markerFile{shardDir / importMarker_};
872  {
873  std::ofstream ofs{markerFile.string()};
874  if (!ofs.is_open())
875  {
876  JLOG(j_.error()) << "shard " << shardIndex
877  << " failed to create temp marker file";
878  shard->removeOnDestroy();
879  continue;
880  }
881  ofs.close();
882  }
883 
884  // Copy the ledgers from node store
885  std::shared_ptr<Ledger> recentStored;
886  boost::optional<uint256> lastLedgerHash;
887 
888  while (auto const ledgerSeq = shard->prepare())
889  {
890  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
891  if (!ledger || ledger->info().seq != ledgerSeq)
892  break;
893 
894  auto const result{shard->storeLedger(ledger, recentStored)};
895  storeStats(result.count, result.size);
896  if (result.error)
897  break;
898 
899  if (!shard->setLedgerStored(ledger))
900  break;
901 
902  if (!lastLedgerHash && ledgerSeq == lastLedgerSeq(shardIndex))
903  lastLedgerHash = ledger->info().hash;
904 
905  recentStored = std::move(ledger);
906  }
907 
908  using namespace boost::filesystem;
909  bool success{false};
910  if (lastLedgerHash && shard->getState() == Shard::complete)
911  {
912  // Store shard final key
913  Serializer s;
915  s.add32(firstLedgerSeq(shardIndex));
916  s.add32(lastLedgerSeq(shardIndex));
917  s.addBitString(*lastLedgerHash);
918  auto const nodeObject{NodeObject::createObject(
919  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
920 
921  if (shard->storeNodeObject(nodeObject))
922  {
923  try
924  {
925  // The import process is complete and the
926  // marker file is no longer required
927  remove_all(markerFile);
928 
929  JLOG(j_.debug()) << "shard " << shardIndex
930  << " was successfully imported";
932  shards_.emplace(shardIndex, std::move(shard))
933  .first->second,
934  true,
935  boost::none);
936  success = true;
937 
938  if (shardIndex < shardBoundaryIndex())
939  ++numHistShards;
940  }
941  catch (std::exception const& e)
942  {
943  JLOG(j_.fatal()) << "shard index " << shardIndex
944  << ". Exception caught in function "
945  << __func__ << ". Error: " << e.what();
946  }
947  }
948  }
949 
950  if (!success)
951  {
952  JLOG(j_.error())
953  << "shard " << shardIndex << " failed to import";
954  shard->removeOnDestroy();
955  }
956  }
957 
958  updateStatus(lock);
959  }
960 
961  setFileStats();
962 }
963 
966 {
968  {
969  std::lock_guard lock(mutex_);
970  assert(init_);
971 
972  auto const it{shards_.find(acquireIndex_)};
973  if (it == shards_.end())
974  return 0;
975  shard = it->second;
976  }
977 
978  return shard->getWriteLoad();
979 }
980 
981 void
983  NodeObjectType type,
984  Blob&& data,
985  uint256 const& hash,
986  std::uint32_t ledgerSeq)
987 {
988  auto const shardIndex{seqToShardIndex(ledgerSeq)};
990  {
991  std::lock_guard lock(mutex_);
992  if (shardIndex != acquireIndex_)
993  {
994  JLOG(j_.trace())
995  << "shard " << shardIndex << " is not being acquired";
996  return;
997  }
998 
999  auto const it{shards_.find(shardIndex)};
1000  if (it == shards_.end())
1001  {
1002  JLOG(j_.error())
1003  << "shard " << shardIndex << " is not being acquired";
1004  return;
1005  }
1006  shard = it->second;
1007  }
1008 
1009  auto const nodeObject{
1010  NodeObject::createObject(type, std::move(data), hash)};
1011  if (shard->storeNodeObject(nodeObject))
1012  storeStats(1, nodeObject->getData().size());
1013 }
1014 
1015 bool
1017 {
1018  auto const ledgerSeq{srcLedger->info().seq};
1019  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1020  std::shared_ptr<Shard> shard;
1021  {
1022  std::lock_guard lock(mutex_);
1023  assert(init_);
1024 
1025  if (shardIndex != acquireIndex_)
1026  {
1027  JLOG(j_.trace())
1028  << "shard " << shardIndex << " is not being acquired";
1029  return false;
1030  }
1031 
1032  auto const it{shards_.find(shardIndex)};
1033  if (it == shards_.end())
1034  {
1035  JLOG(j_.error())
1036  << "shard " << shardIndex << " is not being acquired";
1037  return false;
1038  }
1039  shard = it->second;
1040  }
1041 
1042  auto const result{shard->storeLedger(srcLedger, nullptr)};
1043  storeStats(result.count, result.size);
1044  if (result.error || result.count == 0 || result.size == 0)
1045  return false;
1046 
1047  return setStoredInShard(shard, srcLedger);
1048 }
1049 
1050 void
1052 {
1054  {
1055  std::lock_guard lock(mutex_);
1056  assert(init_);
1057 
1058  shards.reserve(shards_.size());
1059  for (auto const& e : shards_)
1060  shards.push_back(e.second);
1061  }
1062 
1064  openFinals.reserve(openFinalLimit_);
1065 
1066  for (auto const& e : shards)
1067  {
1068  if (auto const shard{e.lock()}; shard && shard->isOpen())
1069  {
1070  shard->sweep();
1071 
1072  if (shard->getState() == Shard::final)
1073  openFinals.emplace_back(std::move(shard));
1074  }
1075  }
1076 
1077  if (openFinals.size() > openFinalLimit_)
1078  {
1079  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1080  << openFinalLimit_ << " by "
1081  << (openFinals.size() - openFinalLimit_);
1082 
1083  // Try to close enough shards to be within the limit.
1084  // Sort ascending on last use so the oldest are removed first.
1085  std::sort(
1086  openFinals.begin(),
1087  openFinals.end(),
1088  [&](std::shared_ptr<Shard> const& lhsShard,
1089  std::shared_ptr<Shard> const& rhsShard) {
1090  return lhsShard->getLastUse() < rhsShard->getLastUse();
1091  });
1092 
1093  for (auto it{openFinals.cbegin()};
1094  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1095  {
1096  if ((*it)->tryClose())
1097  it = openFinals.erase(it);
1098  else
1099  ++it;
1100  }
1101  }
1102 }
1103 
1104 bool
1106 {
1107  auto fail = [j = j_](std::string const& msg) {
1108  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1109  return false;
1110  };
1111 
1112  Config const& config{app_.config()};
1113  Section const& section{config.section(ConfigSection::shardDatabase())};
1114 
1115  {
1116  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1117  // A custom earliest ledger sequence can be set through the
1118  // configuration file using the 'earliest_seq' field under the
1119  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1120  // have a value greater than zero and be equally assigned in
1121  // both stanzas.
1122 
1123  std::uint32_t shardDBEarliestSeq{0};
1124  get_if_exists<std::uint32_t>(
1125  section, "earliest_seq", shardDBEarliestSeq);
1126 
1127  std::uint32_t nodeDBEarliestSeq{0};
1128  get_if_exists<std::uint32_t>(
1129  config.section(ConfigSection::nodeDatabase()),
1130  "earliest_seq",
1131  nodeDBEarliestSeq);
1132 
1133  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1134  {
1135  return fail(
1136  "and [" + ConfigSection::nodeDatabase() +
1137  "] define different 'earliest_seq' values");
1138  }
1139  }
1140 
1141  using namespace boost::filesystem;
1142  if (!get_if_exists<path>(section, "path", dir_))
1143  return fail("'path' missing");
1144 
1145  {
1146  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1147 
1148  Section const& historicalShardPaths =
1149  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1150 
1151  auto values = historicalShardPaths.values();
1152 
1153  std::sort(values.begin(), values.end());
1154  values.erase(std::unique(values.begin(), values.end()), values.end());
1155 
1156  for (auto const& s : values)
1157  {
1158  auto const dir = path(s);
1159  if (dir_ == dir)
1160  {
1161  return fail(
1162  "the 'path' cannot also be in the "
1163  "'historical_shard_path' section");
1164  }
1165 
1167  }
1168  }
1169 
1170  if (section.exists("ledgers_per_shard"))
1171  {
1172  // To be set only in standalone for testing
1173  if (!config.standalone())
1174  return fail("'ledgers_per_shard' only honored in stand alone");
1175 
1176  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1177  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1178  return fail("'ledgers_per_shard' must be a multiple of 256");
1179 
1182  }
1183 
1184  // NuDB is the default and only supported permanent storage backend
1185  backendName_ = get<std::string>(section, "type", "nudb");
1186  if (!boost::iequals(backendName_, "NuDB"))
1187  return fail("'type' value unsupported");
1188 
1189  return true;
1190 }
1191 
1194  uint256 const& hash,
1195  std::uint32_t ledgerSeq,
1196  FetchReport& fetchReport)
1197 {
1198  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1199  std::shared_ptr<Shard> shard;
1200  {
1201  std::lock_guard lock(mutex_);
1202  auto const it{shards_.find(shardIndex)};
1203  if (it == shards_.end())
1204  return nullptr;
1205  shard = it->second;
1206  }
1207 
1208  return shard->fetchNodeObject(hash, fetchReport);
1209 }
1210 
1211 boost::optional<std::uint32_t>
1213  std::uint32_t validLedgerSeq,
1215 {
1216  if (validLedgerSeq < earliestLedgerSeq())
1217  return boost::none;
1218 
1219  auto const maxShardIndex{[this, validLedgerSeq]() {
1220  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1221  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1222  --shardIndex;
1223  return shardIndex;
1224  }()};
1225  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1226 
1227  // Check if the shard store has all shards
1228  if (shards_.size() >= maxNumShards)
1229  return boost::none;
1230 
1231  if (maxShardIndex < 1024 ||
1232  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1233  {
1234  // Small or mostly full index space to sample
1235  // Find the available indexes and select one at random
1237  available.reserve(maxNumShards - shards_.size());
1238 
1239  for (auto shardIndex = earliestShardIndex();
1240  shardIndex <= maxShardIndex;
1241  ++shardIndex)
1242  {
1243  if (shards_.find(shardIndex) == shards_.end() &&
1244  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1245  {
1246  available.push_back(shardIndex);
1247  }
1248  }
1249 
1250  if (available.empty())
1251  return boost::none;
1252 
1253  if (available.size() == 1)
1254  return available.front();
1255 
1256  return available[rand_int(
1257  0u, static_cast<std::uint32_t>(available.size() - 1))];
1258  }
1259 
1260  // Large, sparse index space to sample
1261  // Keep choosing indexes at random until an available one is found
1262  // chances of running more than 30 times is less than 1 in a billion
1263  for (int i = 0; i < 40; ++i)
1264  {
1265  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1266  if (shards_.find(shardIndex) == shards_.end() &&
1267  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1268  {
1269  return shardIndex;
1270  }
1271  }
1272 
1273  assert(false);
1274  return boost::none;
1275 }
1276 
1277 void
1279  std::shared_ptr<Shard>& shard,
1280  bool writeSQLite,
1281  boost::optional<uint256> const& expectedHash)
1282 {
1283  taskQueue_->addTask([this,
1284  wptr = std::weak_ptr<Shard>(shard),
1285  writeSQLite,
1286  expectedHash]() {
1287  if (isStopping())
1288  return;
1289 
1290  auto shard{wptr.lock()};
1291  if (!shard)
1292  {
1293  JLOG(j_.debug()) << "Shard removed before being finalized";
1294  return;
1295  }
1296 
1297  if (!shard->finalize(writeSQLite, expectedHash))
1298  {
1299  if (isStopping())
1300  return;
1301 
1302  // Invalid or corrupt shard, remove it
1303  removeFailedShard(shard);
1304  return;
1305  }
1306 
1307  if (isStopping())
1308  return;
1309 
1310  {
1311  auto const boundaryIndex{shardBoundaryIndex()};
1312 
1313  std::lock_guard lock(mutex_);
1314  updateStatus(lock);
1315 
1316  if (shard->index() < boundaryIndex)
1317  {
1318  // This is a historical shard
1319  if (!historicalPaths_.empty() &&
1320  shard->getDir().parent_path() == dir_)
1321  {
1322  // Shard wasn't placed at a separate historical path
1323  JLOG(j_.warn()) << "shard " << shard->index()
1324  << " is not stored at a historical path";
1325  }
1326  }
1327 
1328  else
1329  {
1330  // Not a historical shard. Shift recent shards if necessary
1331  relocateOutdatedShards(lock);
1332  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1333 
1334  auto& recentShard = shard->index() == boundaryIndex
1337 
1338  // Set the appropriate recent shard index
1339  recentShard = shard->index();
1340 
1341  if (shard->getDir().parent_path() != dir_)
1342  {
1343  JLOG(j_.warn()) << "shard " << shard->index()
1344  << " is not stored at the path";
1345  }
1346  }
1347  }
1348 
1349  setFileStats();
1350 
1351  // Update peers with new shard index
1352  if (!app_.config().standalone() &&
1354  {
1355  protocol::TMPeerShardInfo message;
1356  PublicKey const& publicKey{app_.nodeIdentity().first};
1357  message.set_nodepubkey(publicKey.data(), publicKey.size());
1358  message.set_shardindexes(std::to_string(shard->index()));
1359  app_.overlay().foreach(send_always(std::make_shared<Message>(
1360  message, protocol::mtPEER_SHARD_INFO)));
1361  }
1362  });
1363 }
1364 
1365 void
1367 {
1369  {
1370  std::lock_guard lock(mutex_);
1371  if (shards_.empty())
1372  return;
1373 
1374  shards.reserve(shards_.size());
1375  for (auto const& e : shards_)
1376  shards.push_back(e.second);
1377  }
1378 
1379  std::uint64_t sumSz{0};
1380  std::uint32_t sumFd{0};
1381  std::uint32_t numShards{0};
1382  for (auto const& e : shards)
1383  {
1384  if (auto const shard{e.lock()}; shard)
1385  {
1386  auto const [sz, fd] = shard->getFileInfo();
1387  sumSz += sz;
1388  sumFd += fd;
1389  ++numShards;
1390  }
1391  }
1392 
1393  std::lock_guard lock(mutex_);
1394  fileSz_ = sumSz;
1395  fdRequired_ = sumFd;
1396  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1397 
1398  if (!canAdd_)
1399  return;
1400 
1401  if (auto const count = numHistoricalShards(lock);
1402  count >= maxHistoricalShards_)
1403  {
1405  {
1406  // In order to avoid excessive output, don't produce
1407  // this warning if the server isn't configured to
1408  // store historical shards.
1409  JLOG(j_.warn()) << "maximum number of historical shards reached";
1410  }
1411 
1412  canAdd_ = false;
1413  }
1414  else if (!sufficientStorage(
1415  maxHistoricalShards_ - count,
1417  lock))
1418  {
1419  JLOG(j_.warn())
1420  << "maximum shard store size exceeds available storage space";
1421 
1422  canAdd_ = false;
1423  }
1424 }
1425 
1426 void
1428 {
1429  if (!shards_.empty())
1430  {
1432  for (auto const& e : shards_)
1433  if (e.second->getState() == Shard::final)
1434  rs.insert(e.second->index());
1435  status_ = to_string(rs);
1436  }
1437  else
1438  status_.clear();
1439 }
1440 
1441 bool
1443  std::uint32_t numShards,
1444  PathDesignation pathDesignation,
1445  std::lock_guard<std::mutex> const&) const
1446 {
1447  try
1448  {
1449  std::vector<std::uint64_t> capacities;
1450 
1451  if (pathDesignation == PathDesignation::historical &&
1453  {
1454  capacities.reserve(historicalPaths_.size());
1455 
1456  for (auto const& path : historicalPaths_)
1457  {
1458  // Get the available storage for each historical path
1459  auto const availableSpace =
1460  boost::filesystem::space(path).available;
1461 
1462  capacities.push_back(availableSpace);
1463  }
1464  }
1465  else
1466  {
1467  // Get the available storage for the main shard path
1468  capacities.push_back(boost::filesystem::space(dir_).available);
1469  }
1470 
1471  for (std::uint64_t const capacity : capacities)
1472  {
1473  // Leverage all the historical shard paths to
1474  // see if collectively they can fit the specified
1475  // number of shards. For this to work properly,
1476  // each historical path must correspond to a separate
1477  // physical device or filesystem.
1478 
1479  auto const shardCap = capacity / avgShardFileSz_;
1480  if (numShards <= shardCap)
1481  return true;
1482 
1483  numShards -= shardCap;
1484  }
1485  }
1486  catch (std::exception const& e)
1487  {
1488  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1489  << ". Error: " << e.what();
1490  return false;
1491  }
1492 
1493  return false;
1494 }
1495 
1496 bool
1498  std::shared_ptr<Shard>& shard,
1499  std::shared_ptr<Ledger const> const& ledger)
1500 {
1501  if (!shard->setLedgerStored(ledger))
1502  {
1503  // Invalid or corrupt shard, remove it
1504  removeFailedShard(shard);
1505  return false;
1506  }
1507 
1508  if (shard->getState() == Shard::complete)
1509  {
1510  std::lock_guard lock(mutex_);
1511  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1512  {
1513  if (shard->index() == acquireIndex_)
1514  acquireIndex_ = 0;
1515 
1516  finalizeShard(it->second, false, boost::none);
1517  }
1518  else
1519  {
1520  JLOG(j_.debug())
1521  << "shard " << shard->index() << " is no longer being acquired";
1522  }
1523  }
1524 
1525  setFileStats();
1526  return true;
1527 }
1528 
1529 void
1531 {
1532  {
1533  std::lock_guard lock(mutex_);
1534 
1535  if (shard->index() == acquireIndex_)
1536  acquireIndex_ = 0;
1537 
1538  if (shard->index() == latestShardIndex_)
1539  latestShardIndex_ = boost::none;
1540 
1541  if (shard->index() == secondLatestShardIndex_)
1542  secondLatestShardIndex_ = boost::none;
1543 
1544  if ((shards_.erase(shard->index()) > 0) &&
1545  shard->getState() == Shard::final)
1546  {
1547  updateStatus(lock);
1548  }
1549  }
1550 
1551  shard->removeOnDestroy();
1552 
1553  // Reset the shared_ptr to invoke the shard's
1554  // destructor and remove it from the server
1555  shard.reset();
1556  setFileStats();
1557 }
1558 
1561 {
1562  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1563 
1564  if (validIndex < earliestLedgerSeq())
1565  return 0;
1566 
1567  // Shards with an index earlier than the recent shard boundary index
1568  // are considered historical. The three shards at or later than
1569  // this index consist of the two most recently validated shards
1570  // and the shard still in the process of being built by live
1571  // transactions.
1572  return seqToShardIndex(validIndex) - 1;
1573 }
1574 
1577  std::lock_guard<std::mutex> const& lock) const
1578 {
1579  auto const boundaryIndex{shardBoundaryIndex()};
1580  return std::count_if(
1581  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1582  return entry.first < boundaryIndex;
1583  });
1584 }
1585 
1586 void
1588  std::lock_guard<std::mutex> const& lock)
1589 {
1590  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1591  cur || prev)
1592  {
1593  auto const latestShardIndex =
1595 
1596  auto const separateHistoricalPath = !historicalPaths_.empty();
1597 
1598  auto const removeShard =
1599  [this](std::uint32_t const shardIndex) -> void {
1600  canAdd_ = false;
1601 
1602  if (auto it = shards_.find(shardIndex); it != shards_.end())
1603  {
1604  if (it->second)
1605  removeFailedShard(it->second);
1606  else
1607  {
1608  JLOG(j_.warn()) << "can't find shard to remove";
1609  }
1610  }
1611  else
1612  {
1613  JLOG(j_.warn()) << "can't find shard to remove";
1614  }
1615  };
1616 
1617  auto const keepShard =
1618  [this, &lock, removeShard, separateHistoricalPath](
1619  std::uint32_t const shardIndex) -> bool {
1621  {
1622  JLOG(j_.error())
1623  << "maximum number of historical shards reached";
1624 
1625  removeShard(shardIndex);
1626  return false;
1627  }
1628  if (separateHistoricalPath &&
1630  {
1631  JLOG(j_.error()) << "insufficient storage space available";
1632 
1633  removeShard(shardIndex);
1634  return false;
1635  }
1636 
1637  return true;
1638  };
1639 
1640  // Move a shard from the main shard path to a historical shard
1641  // path by copying the contents, and creating a new shard.
1642  auto const moveShard = [this,
1643  &lock](std::uint32_t const shardIndex) -> void {
1644  auto const dst = chooseHistoricalPath(lock);
1645 
1646  if (auto it = shards_.find(shardIndex); it != shards_.end())
1647  {
1648  auto& shard{it->second};
1649 
1650  // Close any open file descriptors before moving the shard
1651  // directory. Don't call removeOnDestroy since that would
1652  // attempt to close the fds after the directory has been moved.
1653  if (!shard->tryClose())
1654  {
1655  JLOG(j_.warn())
1656  << "can't close shard to move to historical path";
1657  return;
1658  }
1659 
1660  try
1661  {
1662  // Move the shard directory to the new path
1663  boost::filesystem::rename(
1664  shard->getDir().string(),
1665  dst / std::to_string(shardIndex));
1666  }
1667  catch (...)
1668  {
1669  JLOG(j_.error()) << "shard " << shardIndex
1670  << " failed to move to historical storage";
1671  return;
1672  }
1673 
1674  // Create a shard instance at the new location
1675  shard =
1676  std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1677 
1678  // Open the new shard
1679  if (!shard->init(scheduler_, *ctx_))
1680  {
1681  JLOG(j_.error()) << "shard " << shardIndex
1682  << " failed to open in historical storage";
1683  shard->removeOnDestroy();
1684  shard.reset();
1685  }
1686  }
1687  else
1688  {
1689  JLOG(j_.warn())
1690  << "can't find shard to move to historical path";
1691  }
1692  };
1693 
1694  // See if either of the recent shards needs to be updated
1695  bool const curNotSynched =
1696  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1697  bool const prevNotSynched = secondLatestShardIndex_ &&
1698  *secondLatestShardIndex_ != latestShardIndex - 1;
1699 
1700  // A new shard has been published. Move outdated
1701  // shards to historical storage as needed
1702  if (curNotSynched || prevNotSynched)
1703  {
1704  if (prev)
1705  {
1706  // Move the formerly second latest shard to historical storage
1707  if (keepShard(*prev) && separateHistoricalPath)
1708  {
1709  moveShard(*prev);
1710  }
1711 
1712  prev = boost::none;
1713  }
1714 
1715  if (cur)
1716  {
1717  // The formerly latest shard is now the second latest
1718  if (cur == latestShardIndex - 1)
1719  {
1720  prev = cur;
1721  }
1722 
1723  // The formerly latest shard is no longer a 'recent' shard
1724  else
1725  {
1726  // Move the formerly latest shard to historical storage
1727  if (keepShard(*cur) && separateHistoricalPath)
1728  {
1729  moveShard(*cur);
1730  }
1731  }
1732 
1733  cur = boost::none;
1734  }
1735  }
1736  }
1737 }
1738 
1739 auto
1741  std::uint32_t shardIndex,
1743  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1744 {
1745  // Any shard earlier than the two most recent shards is a historical shard
1746  auto const boundaryIndex{shardBoundaryIndex()};
1747  auto const isHistoricalShard = shardIndex < boundaryIndex;
1748 
1749  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1752 
1753  // Check shard count and available storage space
1754  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1755  {
1756  JLOG(j_.error()) << "maximum number of historical shards reached";
1757  canAdd_ = false;
1758  return boost::none;
1759  }
1760  if (!sufficientStorage(1, designation, lock))
1761  {
1762  JLOG(j_.error()) << "insufficient storage space available";
1763  canAdd_ = false;
1764  return boost::none;
1765  }
1766 
1767  return designation;
1768 }
1769 
1770 boost::filesystem::path
1772 {
1773  // If not configured with separate historical paths,
1774  // use the main path (dir_) by default.
1775  if (historicalPaths_.empty())
1776  return dir_;
1777 
1778  boost::filesystem::path historicalShardPath;
1779  std::vector<boost::filesystem::path> potentialPaths;
1780 
1781  for (boost::filesystem::path const& path : historicalPaths_)
1782  {
1783  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1784  potentialPaths.push_back(path);
1785  }
1786 
1787  if (potentialPaths.empty())
1788  {
1789  JLOG(j_.error()) << "failed to select a historical shard path";
1790  return "";
1791  }
1792 
1793  std::sample(
1794  potentialPaths.begin(),
1795  potentialPaths.end(),
1796  &historicalShardPath,
1797  1,
1798  default_prng());
1799 
1800  return historicalShardPath;
1801 }
1802 
1803 bool
1805 {
1806 #if BOOST_OS_LINUX
1807  // Each historical shard path must correspond
1808  // to a directory on a distinct device or file system.
1809  // Currently, this constraint is enforced only on Linux.
1812 
1813  for (auto const& path : historicalPaths_)
1814  {
1815  struct statvfs buffer;
1816  if (statvfs(path.c_str(), &buffer))
1817  {
1818  JLOG(j_.error())
1819  << "failed to acquire stats for 'historical_shard_path': "
1820  << path;
1821  return false;
1822  }
1823 
1824  filesystemIDs[buffer.f_fsid].push_back(path.string());
1825  }
1826 
1827  bool ret = true;
1828  for (auto const& entry : filesystemIDs)
1829  {
1830  // Check to see if any of the paths are stored on the same file system
1831  if (entry.second.size() > 1)
1832  {
1833  // Two or more historical storage paths
1834  // correspond to the same file system.
1835  JLOG(j_.error())
1836  << "The following paths correspond to the same filesystem: "
1837  << boost::algorithm::join(entry.second, ", ")
1838  << ". Each configured historical storage path should"
1839  " be on a unique device or filesystem.";
1840 
1841  ret = false;
1842  }
1843  }
1844 
1845  return ret;
1846 
1847 #else
1848  // The requirement that each historical storage path
1849  // corresponds to a distinct device or file system is
1850  // enforced only on Linux, so on other platforms
1851  // keep track of the available capacities for each
1852  // path. Issue a warning if we suspect any of the paths
1853  // may violate this requirement.
1854 
1855  // Map byte counts to each path that shares that byte count.
1857  uniqueCapacities(historicalPaths_.size());
1858 
1859  for (auto const& path : historicalPaths_)
1860  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1861  path.string());
1862 
1863  for (auto const& entry : uniqueCapacities)
1864  {
1865  // Check to see if any paths have the same amount of available bytes.
1866  if (entry.second.size() > 1)
1867  {
1868  // Two or more historical storage paths may
1869  // correspond to the same device or file system.
1870  JLOG(j_.warn())
1871  << "Each of the following paths have " << entry.first
1872  << " bytes free, and may be located on the same device"
1873  " or file system: "
1874  << boost::algorithm::join(entry.second, ", ")
1875  << ". Each configured historical storage path should"
1876  " be on a unique device or file system.";
1877  }
1878  }
1879 #endif
1880 
1881  return true;
1882 }
1883 
1884 //------------------------------------------------------------------------------
1885 
1888  Application& app,
1889  Stoppable& parent,
1890  Scheduler& scheduler,
1891  int readThreads,
1892  beast::Journal j)
1893 {
1894  // The shard store is optional. Future changes will require it.
1895  Section const& section{
1897  if (section.empty())
1898  return nullptr;
1899 
1900  return std::make_unique<DatabaseShardImp>(
1901  app, parent, "ShardStore", scheduler, readThreads, j);
1902 }
1903 
1904 } // namespace NodeStore
1905 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1887
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1137
ripple::Application
Definition: Application.h:97
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:214
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:211
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:108
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:86
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:169
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:228
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:167
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1016
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:52
ripple::NodeStore::Shard::acquire
static constexpr State acquire
Definition: Shard.h:60
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1229
ripple::SizedItem
SizedItem
Definition: Config.h:48
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::Stoppable::stopped
void stopped()
Called by derived classes to indicate that the stoppable has stopped.
Definition: Stoppable.cpp:72
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:162
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:405
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:236
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:206
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::CashFilter::none
@ none
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:176
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:616
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1427
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:233
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1604
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1278
boost
Definition: IPAddress.h:117
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:45
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:205
ripple::NodeStore::Shard::complete
static constexpr State complete
Definition: Shard.h:61
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:220
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1051
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:298
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:200
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:99
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1193
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:217
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:194
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:414
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:965
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:201
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1771
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1442
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:170
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:678
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:92
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:67
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:188
ripple::Config::standalone
bool standalone() const
Definition: Config.h:255
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1530
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:432
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:982
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1804
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1105
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:232
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:228
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1366
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:185
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:142
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:245
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:182
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:67
ripple::NodeStore::Shard::final
static constexpr State final
Definition: Shard.h:63
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::Database::stopReadThreads
void stopReadThreads()
Definition: Database.cpp:75
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:203
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:168
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1212
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:223
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:729
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:200
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:234
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:538
std::vector::begin
T begin(T... args)
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1576
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1587
ripple::NodeStore::DatabaseShardImp::onChildrenStopped
void onChildrenStopped() override
Override called when all children have stopped.
Definition: DatabaseShardImp.cpp:700
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1560
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:296
std::unique
T unique(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1291
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1497
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:191
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:235
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::shards_
std::unordered_map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:179
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::PublisherStatus::available
@ available
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1740
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:687
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:173
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:197