rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull))
61  , openFinalLimit_(
62  app.config().getValueFor(SizedItem::openFinalLimit, boost::none))
63 {
64  if (app.config().reporting())
65  {
66  Throw<std::runtime_error>(
67  "Attempted to create DatabaseShardImp in reporting mode. Reporting "
68  "does not support shards. Remove shards info from config");
69  }
70 }
71 
72 bool
74 {
75  {
76  std::lock_guard lock(mutex_);
77  if (init_)
78  {
79  JLOG(j_.error()) << "already initialized";
80  return false;
81  }
82 
83  if (!initConfig(lock))
84  {
85  JLOG(j_.error()) << "invalid configuration file settings";
86  return false;
87  }
88 
89  try
90  {
91  using namespace boost::filesystem;
92 
93  // Consolidate the main storage path and all historical paths
94  std::vector<path> paths{dir_};
95  paths.insert(
96  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
97 
98  for (auto const& path : paths)
99  {
100  if (exists(path))
101  {
102  if (!is_directory(path))
103  {
104  JLOG(j_.error()) << path << " must be a directory";
105  return false;
106  }
107  }
108  else if (!create_directories(path))
109  {
110  JLOG(j_.error())
111  << "failed to create path: " + path.string();
112  return false;
113  }
114  }
115 
117  {
118  // Check historical paths for duplicated file systems
119  if (!checkHistoricalPaths())
120  return false;
121  }
122 
123  ctx_ = std::make_unique<nudb::context>();
124  ctx_->start();
125 
126  // Find shards
127  std::uint32_t openFinals{0};
128  for (auto const& path : paths)
129  {
130  for (auto const& it : directory_iterator(path))
131  {
132  // Ignore files
133  if (!is_directory(it))
134  continue;
135 
136  // Ignore nonnumerical directory names
137  auto const shardDir{it.path()};
138  auto dirName{shardDir.stem().string()};
139  if (!std::all_of(
140  dirName.begin(), dirName.end(), [](auto c) {
141  return ::isdigit(static_cast<unsigned char>(c));
142  }))
143  {
144  continue;
145  }
146 
147  // Ignore values below the earliest shard index
148  auto const shardIndex{std::stoul(dirName)};
149  if (shardIndex < earliestShardIndex())
150  {
151  JLOG(j_.debug())
152  << "shard " << shardIndex
153  << " ignored, comes before earliest shard index "
154  << earliestShardIndex();
155  continue;
156  }
157 
158  // Check if a previous import failed
159  if (is_regular_file(shardDir / importMarker_))
160  {
161  JLOG(j_.warn())
162  << "shard " << shardIndex
163  << " previously failed import, removing";
164  remove_all(shardDir);
165  continue;
166  }
167 
168  auto shard{std::make_shared<Shard>(
169  app_, *this, shardIndex, shardDir.parent_path(), j_)};
170  if (!shard->init(scheduler_, *ctx_))
171  {
172  // Remove corrupted or legacy shard
173  shard->removeOnDestroy();
174  JLOG(j_.warn())
175  << "shard " << shardIndex << " removed, "
176  << (shard->isLegacy() ? "legacy" : "corrupted")
177  << " shard";
178  continue;
179  }
180 
181  switch (shard->getState())
182  {
183  case Shard::final:
184  if (++openFinals > openFinalLimit_)
185  shard->tryClose();
186  shards_.emplace(shardIndex, std::move(shard));
187  break;
188 
189  case Shard::complete:
191  shards_.emplace(shardIndex, std::move(shard))
192  .first->second,
193  true,
194  boost::none);
195  break;
196 
197  case Shard::acquire:
198  if (acquireIndex_ != 0)
199  {
200  JLOG(j_.error())
201  << "more than one shard being acquired";
202  return false;
203  }
204 
205  shards_.emplace(shardIndex, std::move(shard));
206  acquireIndex_ = shardIndex;
207  break;
208 
209  default:
210  JLOG(j_.error())
211  << "shard " << shardIndex << " invalid state";
212  return false;
213  }
214  }
215  }
216  }
217  catch (std::exception const& e)
218  {
219  JLOG(j_.fatal()) << "Exception caught in function " << __func__
220  << ". Error: " << e.what();
221  return false;
222  }
223 
224  updateStatus(lock);
226  init_ = true;
227  }
228 
229  setFileStats();
230  return true;
231 }
232 
233 boost::optional<std::uint32_t>
235 {
236  boost::optional<std::uint32_t> shardIndex;
237 
238  {
239  std::lock_guard lock(mutex_);
240  assert(init_);
241 
242  if (acquireIndex_ != 0)
243  {
244  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
245  return it->second->prepare();
246 
247  // Should never get here
248  assert(false);
249  return boost::none;
250  }
251 
252  if (!canAdd_)
253  return boost::none;
254 
255  shardIndex = findAcquireIndex(validLedgerSeq, lock);
256  }
257 
258  if (!shardIndex)
259  {
260  JLOG(j_.debug()) << "no new shards to add";
261  {
262  std::lock_guard lock(mutex_);
263  canAdd_ = false;
264  }
265  return boost::none;
266  }
267 
268  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
269  std::lock_guard lock(mutex_);
270  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
271  }();
272 
273  if (!pathDesignation)
274  return boost::none;
275 
276  auto const needsHistoricalPath =
277  *pathDesignation == PathDesignation::historical;
278 
279  auto shard = [this, shardIndex, needsHistoricalPath] {
280  std::lock_guard lock(mutex_);
281  return std::make_unique<Shard>(
282  app_,
283  *this,
284  *shardIndex,
285  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
286  j_);
287  }();
288 
289  if (!shard->init(scheduler_, *ctx_))
290  return boost::none;
291 
292  auto const ledgerSeq{shard->prepare()};
293  {
294  std::lock_guard lock(mutex_);
295  shards_.emplace(*shardIndex, std::move(shard));
296  acquireIndex_ = *shardIndex;
297  }
298  return ledgerSeq;
299 }
300 
301 bool
303 {
304  auto fail = [j = j_, &shardIndexes](
305  std::string const& msg,
306  boost::optional<std::uint32_t> shardIndex = boost::none) {
307  auto multipleIndexPrequel = [&shardIndexes] {
308  std::vector<std::string> indexesAsString(shardIndexes.size());
310  shardIndexes.begin(),
311  shardIndexes.end(),
312  indexesAsString.begin(),
313  [](uint32_t const index) { return std::to_string(index); });
314 
315  return std::string("shard") +
316  (shardIndexes.size() > 1 ? "s " : " ") +
317  boost::algorithm::join(indexesAsString, ", ");
318  };
319 
320  std::string const prequel = shardIndex
321  ? "shard " + std::to_string(*shardIndex)
322  : multipleIndexPrequel();
323 
324  JLOG(j.error()) << prequel << " " << msg;
325  return false;
326  };
327 
328  std::lock_guard lock(mutex_);
329  assert(init_);
330 
331  if (!canAdd_)
332  return fail("cannot be stored at this time");
333 
334  auto historicalShardsToPrepare = 0;
335 
336  for (auto const shardIndex : shardIndexes)
337  {
338  if (shardIndex < earliestShardIndex())
339  {
340  return fail(
341  "comes before earliest shard index " +
343  shardIndex);
344  }
345 
346  // If we are synced to the network, check if the shard index is
347  // greater or equal to the current or validated shard index.
348  auto seqCheck = [&](std::uint32_t ledgerSeq) {
349  if (ledgerSeq >= earliestLedgerSeq() &&
350  shardIndex >= seqToShardIndex(ledgerSeq))
351  {
352  return fail("invalid index", shardIndex);
353  }
354  return true;
355  };
356  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
358  {
359  return fail("invalid index", shardIndex);
360  }
361 
362  if (shards_.find(shardIndex) != shards_.end())
363  return fail("is already stored", shardIndex);
364 
365  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
366  return fail("is already queued for import", shardIndex);
367 
368  // Any shard earlier than the two most recent shards
369  // is a historical shard
370  if (shardIndex < shardBoundaryIndex())
371  ++historicalShardsToPrepare;
372  }
373 
374  auto const numHistShards = numHistoricalShards(lock);
375 
376  // Check shard count and available storage space
377  if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_)
378  return fail("maximum number of historical shards reached");
379 
380  if (historicalShardsToPrepare)
381  {
382  // Check available storage space for historical shards
383  if (!sufficientStorage(
384  historicalShardsToPrepare, PathDesignation::historical, lock))
385  return fail("insufficient storage space available");
386  }
387 
388  if (auto const recentShardsToPrepare =
389  shardIndexes.size() - historicalShardsToPrepare;
390  recentShardsToPrepare)
391  {
392  // Check available storage space for recent shards
393  if (!sufficientStorage(
394  recentShardsToPrepare, PathDesignation::none, lock))
395  return fail("insufficient storage space available");
396  }
397 
398  for (auto const shardIndex : shardIndexes)
399  {
400  auto const prepareSuccessful =
401  preparedIndexes_.emplace(shardIndex).second;
402 
403  (void)prepareSuccessful;
404  assert(prepareSuccessful);
405  }
406 
407  return true;
408 }
409 
410 void
412 {
413  std::lock_guard lock(mutex_);
414  assert(init_);
415 
416  preparedIndexes_.erase(shardIndex);
417 }
418 
421 {
423  {
424  std::lock_guard lock(mutex_);
425  assert(init_);
426 
427  for (auto const& shardIndex : preparedIndexes_)
428  rs.insert(shardIndex);
429  }
430 
431  if (rs.empty())
432  return {};
433 
434  return to_string(rs);
435 };
436 
437 bool
439  std::uint32_t shardIndex,
440  boost::filesystem::path const& srcDir)
441 {
442  auto fail = [&](std::string const& msg,
443  std::lock_guard<std::mutex> const& lock) {
444  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
445 
446  // Remove the failed import shard index so it can be retried
447  preparedIndexes_.erase(shardIndex);
448  return false;
449  };
450 
451  using namespace boost::filesystem;
452  try
453  {
454  if (!is_directory(srcDir) || is_empty(srcDir))
455  {
456  return fail(
457  "invalid source directory " + srcDir.string(),
459  }
460  }
461  catch (std::exception const& e)
462  {
463  return fail(
464  std::string(". Exception caught in function ") + __func__ +
465  ". Error: " + e.what(),
467  }
468 
469  auto const expectedHash{app_.getLedgerMaster().walkHashBySeq(
471  if (!expectedHash)
472  return fail("expected hash not found", std::lock_guard(mutex_));
473 
474  path dstDir;
475  {
476  std::lock_guard lock(mutex_);
477  if (shards_.find(shardIndex) != shards_.end())
478  return fail("already exists", lock);
479 
480  // Check shard was prepared for import
481  if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
482  return fail("was not prepared for import", lock);
483 
484  auto const pathDesignation{
485  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)};
486  if (!pathDesignation)
487  return fail("failed to import", lock);
488 
489  if (*pathDesignation == PathDesignation::historical)
490  dstDir = chooseHistoricalPath(lock);
491  else
492  dstDir = dir_;
493  }
494  dstDir /= std::to_string(shardIndex);
495 
496  auto renameDir = [&](path const& src, path const& dst) {
497  try
498  {
499  rename(src, dst);
500  }
501  catch (std::exception const& e)
502  {
503  return fail(
504  std::string(". Exception caught in function ") + __func__ +
505  ". Error: " + e.what(),
507  }
508  return true;
509  };
510 
511  // Rename source directory to the shard database directory
512  if (!renameDir(srcDir, dstDir))
513  return false;
514 
515  // Create the new shard
516  auto shard{std::make_unique<Shard>(
517  app_, *this, shardIndex, dstDir.parent_path(), j_)};
518 
519  if (!shard->init(scheduler_, *ctx_) || shard->getState() != Shard::complete)
520  {
521  shard.reset();
522  renameDir(dstDir, srcDir);
523  return fail("failed to import", std::lock_guard(mutex_));
524  }
525 
526  auto const [it, inserted] = [&]() {
527  std::lock_guard lock(mutex_);
528  preparedIndexes_.erase(shardIndex);
529  return shards_.emplace(shardIndex, std::move(shard));
530  }();
531 
532  if (!inserted)
533  {
534  shard.reset();
535  renameDir(dstDir, srcDir);
536  return fail("failed to import", std::lock_guard(mutex_));
537  }
538 
539  finalizeShard(it->second, true, expectedHash);
540  return true;
541 }
542 
545 {
546  auto const shardIndex{seqToShardIndex(ledgerSeq)};
547  {
549  {
550  std::lock_guard lock(mutex_);
551  assert(init_);
552 
553  auto const it{shards_.find(shardIndex)};
554  if (it == shards_.end())
555  return nullptr;
556  shard = it->second;
557  }
558 
559  // Ledger must be stored in a final or acquiring shard
560  switch (shard->getState())
561  {
562  case Shard::final:
563  break;
564  case Shard::acquire:
565  if (shard->containsLedger(ledgerSeq))
566  break;
567  [[fallthrough]];
568  default:
569  return nullptr;
570  }
571  }
572 
573  auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)};
574  if (!nodeObject)
575  return nullptr;
576 
577  auto fail = [&](std::string const& msg) -> std::shared_ptr<Ledger> {
578  JLOG(j_.error()) << "shard " << shardIndex << " " << msg;
579  return nullptr;
580  };
581 
582  auto ledger{std::make_shared<Ledger>(
583  deserializePrefixedHeader(makeSlice(nodeObject->getData())),
584  app_.config(),
585  *app_.getShardFamily())};
586 
587  if (ledger->info().seq != ledgerSeq)
588  {
589  return fail(
590  "encountered invalid ledger sequence " + std::to_string(ledgerSeq));
591  }
592  if (ledger->info().hash != hash)
593  {
594  return fail(
595  "encountered invalid ledger hash " + to_string(hash) +
596  " on sequence " + std::to_string(ledgerSeq));
597  }
598 
599  ledger->setFull();
600  if (!ledger->stateMap().fetchRoot(
601  SHAMapHash{ledger->info().accountHash}, nullptr))
602  {
603  return fail(
604  "is missing root STATE node on hash " + to_string(hash) +
605  " on sequence " + std::to_string(ledgerSeq));
606  }
607 
608  if (ledger->info().txHash.isNonZero())
609  {
610  if (!ledger->txMap().fetchRoot(
611  SHAMapHash{ledger->info().txHash}, nullptr))
612  {
613  return fail(
614  "is missing root TXN node on hash " + to_string(hash) +
615  " on sequence " + std::to_string(ledgerSeq));
616  }
617  }
618  return ledger;
619 }
620 
621 void
623 {
624  auto const ledgerSeq{ledger->info().seq};
625  if (ledger->info().hash.isZero())
626  {
627  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
628  << ledgerSeq;
629  return;
630  }
631  if (ledger->info().accountHash.isZero())
632  {
633  JLOG(j_.error()) << "zero account hash for ledger sequence "
634  << ledgerSeq;
635  return;
636  }
637  if (ledger->stateMap().getHash().isNonZero() &&
638  !ledger->stateMap().isValid())
639  {
640  JLOG(j_.error()) << "invalid state map for ledger sequence "
641  << ledgerSeq;
642  return;
643  }
644  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
645  {
646  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
647  << ledgerSeq;
648  return;
649  }
650 
651  auto const shardIndex{seqToShardIndex(ledgerSeq)};
653  {
654  std::lock_guard lock(mutex_);
655  assert(init_);
656 
657  if (shardIndex != acquireIndex_)
658  {
659  JLOG(j_.trace())
660  << "shard " << shardIndex << " is not being acquired";
661  return;
662  }
663 
664  auto const it{shards_.find(shardIndex)};
665  if (it == shards_.end())
666  {
667  JLOG(j_.error())
668  << "shard " << shardIndex << " is not being acquired";
669  return;
670  }
671  shard = it->second;
672  }
673 
674  if (shard->containsLedger(ledgerSeq))
675  {
676  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
677  return;
678  }
679 
680  setStoredInShard(shard, ledger);
681 }
682 
685 {
686  std::lock_guard lock(mutex_);
687  assert(init_);
688 
689  return status_;
690 }
691 
692 void
694 {
695  // Stop read threads in base before data members are destroyed
696  stopReadThreads();
697 
698  std::lock_guard lock(mutex_);
699 
700  // Notify shards to stop
701  for (auto const& e : shards_)
702  e.second->stop();
703 }
704 
705 void
707 {
709  {
710  std::lock_guard lock(mutex_);
711 
712  shards.reserve(shards_.size());
713  for (auto const& e : shards_)
714  shards.push_back(e.second);
715  shards_.clear();
716  }
717 
718  // All shards should be expired at this point
719  for (auto const& e : shards)
720  {
721  if (!e.expired())
722  {
723  std::string shardIndex;
724  if (auto const shard{e.lock()}; shard)
725  shardIndex = std::to_string(shard->index());
726 
727  JLOG(j_.warn()) << " shard " << shardIndex << " unexpired";
728  }
729  }
730 
731  stopped();
732 }
733 
734 void
736 {
737  {
738  std::lock_guard lock(mutex_);
739  assert(init_);
740 
741  // Only the application local node store can be imported
742  if (&source != &app_.getNodeStore())
743  {
744  assert(false);
745  JLOG(j_.error()) << "invalid source database";
746  return;
747  }
748 
749  std::uint32_t earliestIndex;
750  std::uint32_t latestIndex;
751  {
752  auto loadLedger = [&](bool ascendSort =
753  true) -> boost::optional<std::uint32_t> {
755  std::uint32_t ledgerSeq;
756  std::tie(ledger, ledgerSeq, std::ignore) = loadLedgerHelper(
757  "WHERE LedgerSeq >= " +
759  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
760  " limit 1",
761  app_,
762  false);
763  if (!ledger || ledgerSeq == 0)
764  {
765  JLOG(j_.error()) << "no suitable ledgers were found in"
766  " the SQLite database to import";
767  return boost::none;
768  }
769  return ledgerSeq;
770  };
771 
772  // Find earliest ledger sequence stored
773  auto ledgerSeq{loadLedger()};
774  if (!ledgerSeq)
775  return;
776  earliestIndex = seqToShardIndex(*ledgerSeq);
777 
778  // Consider only complete shards
779  if (ledgerSeq != firstLedgerSeq(earliestIndex))
780  ++earliestIndex;
781 
782  // Find last ledger sequence stored
783  ledgerSeq = loadLedger(false);
784  if (!ledgerSeq)
785  return;
786  latestIndex = seqToShardIndex(*ledgerSeq);
787 
788  // Consider only complete shards
789  if (ledgerSeq != lastLedgerSeq(latestIndex))
790  --latestIndex;
791 
792  if (latestIndex < earliestIndex)
793  {
794  JLOG(j_.error()) << "no suitable ledgers were found in"
795  " the SQLite database to import";
796  return;
797  }
798  }
799 
800  auto numHistShards = this->numHistoricalShards(lock);
801 
802  // Import the shards
803  for (std::uint32_t shardIndex = earliestIndex;
804  shardIndex <= latestIndex;
805  ++shardIndex)
806  {
807  auto const pathDesignation =
808  prepareForNewShard(shardIndex, numHistShards, lock);
809 
810  if (!pathDesignation)
811  break;
812 
813  auto const needsHistoricalPath =
814  *pathDesignation == PathDesignation::historical;
815 
816  // Skip if being acquired
817  if (shardIndex == acquireIndex_)
818  {
819  JLOG(j_.debug())
820  << "shard " << shardIndex << " already being acquired";
821  continue;
822  }
823 
824  // Skip if being imported
825  if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end())
826  {
827  JLOG(j_.debug())
828  << "shard " << shardIndex << " already being imported";
829  continue;
830  }
831 
832  // Skip if stored
833  if (shards_.find(shardIndex) != shards_.end())
834  {
835  JLOG(j_.debug()) << "shard " << shardIndex << " already stored";
836  continue;
837  }
838 
839  // Verify SQLite ledgers are in the node store
840  {
841  auto const firstSeq{firstLedgerSeq(shardIndex)};
842  auto const lastSeq{
843  std::max(firstSeq, lastLedgerSeq(shardIndex))};
844  auto const numLedgers{
845  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
846  : ledgersPerShard_};
847  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
848  if (ledgerHashes.size() != numLedgers)
849  continue;
850 
851  bool valid{true};
852  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
853  {
854  if (!source.fetchNodeObject(ledgerHashes[n].first, n))
855  {
856  JLOG(j_.warn()) << "SQLite ledger sequence " << n
857  << " mismatches node store";
858  valid = false;
859  break;
860  }
861  }
862  if (!valid)
863  continue;
864  }
865 
866  auto const path =
867  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
868 
869  // Create the new shard
870  auto shard{
871  std::make_unique<Shard>(app_, *this, shardIndex, path, j_)};
872  if (!shard->init(scheduler_, *ctx_))
873  continue;
874 
875  // Create a marker file to signify an import in progress
876  auto const shardDir{path / std::to_string(shardIndex)};
877  auto const markerFile{shardDir / importMarker_};
878  {
879  std::ofstream ofs{markerFile.string()};
880  if (!ofs.is_open())
881  {
882  JLOG(j_.error()) << "shard " << shardIndex
883  << " failed to create temp marker file";
884  shard->removeOnDestroy();
885  continue;
886  }
887  ofs.close();
888  }
889 
890  // Copy the ledgers from node store
891  std::shared_ptr<Ledger> recentStored;
892  boost::optional<uint256> lastLedgerHash;
893 
894  while (auto const ledgerSeq = shard->prepare())
895  {
896  auto ledger{loadByIndex(*ledgerSeq, app_, false)};
897  if (!ledger || ledger->info().seq != ledgerSeq)
898  break;
899 
900  auto const result{shard->storeLedger(ledger, recentStored)};
901  storeStats(result.count, result.size);
902  if (result.error)
903  break;
904 
905  if (!shard->setLedgerStored(ledger))
906  break;
907 
908  if (!lastLedgerHash && ledgerSeq == lastLedgerSeq(shardIndex))
909  lastLedgerHash = ledger->info().hash;
910 
911  recentStored = std::move(ledger);
912  }
913 
914  using namespace boost::filesystem;
915  bool success{false};
916  if (lastLedgerHash && shard->getState() == Shard::complete)
917  {
918  // Store shard final key
919  Serializer s;
921  s.add32(firstLedgerSeq(shardIndex));
922  s.add32(lastLedgerSeq(shardIndex));
923  s.addBitString(*lastLedgerHash);
924  auto const nodeObject{NodeObject::createObject(
925  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
926 
927  if (shard->storeNodeObject(nodeObject))
928  {
929  try
930  {
931  // The import process is complete and the
932  // marker file is no longer required
933  remove_all(markerFile);
934 
935  JLOG(j_.debug()) << "shard " << shardIndex
936  << " was successfully imported";
938  shards_.emplace(shardIndex, std::move(shard))
939  .first->second,
940  true,
941  boost::none);
942  success = true;
943 
944  if (shardIndex < shardBoundaryIndex())
945  ++numHistShards;
946  }
947  catch (std::exception const& e)
948  {
949  JLOG(j_.fatal()) << "shard index " << shardIndex
950  << ". Exception caught in function "
951  << __func__ << ". Error: " << e.what();
952  }
953  }
954  }
955 
956  if (!success)
957  {
958  JLOG(j_.error())
959  << "shard " << shardIndex << " failed to import";
960  shard->removeOnDestroy();
961  }
962  }
963 
964  updateStatus(lock);
965  }
966 
967  setFileStats();
968 }
969 
972 {
974  {
975  std::lock_guard lock(mutex_);
976  assert(init_);
977 
978  auto const it{shards_.find(acquireIndex_)};
979  if (it == shards_.end())
980  return 0;
981  shard = it->second;
982  }
983 
984  return shard->getWriteLoad();
985 }
986 
987 void
989  NodeObjectType type,
990  Blob&& data,
991  uint256 const& hash,
992  std::uint32_t ledgerSeq)
993 {
994  auto const shardIndex{seqToShardIndex(ledgerSeq)};
996  {
997  std::lock_guard lock(mutex_);
998  if (shardIndex != acquireIndex_)
999  {
1000  JLOG(j_.trace())
1001  << "shard " << shardIndex << " is not being acquired";
1002  return;
1003  }
1004 
1005  auto const it{shards_.find(shardIndex)};
1006  if (it == shards_.end())
1007  {
1008  JLOG(j_.error())
1009  << "shard " << shardIndex << " is not being acquired";
1010  return;
1011  }
1012  shard = it->second;
1013  }
1014 
1015  auto const nodeObject{
1016  NodeObject::createObject(type, std::move(data), hash)};
1017  if (shard->storeNodeObject(nodeObject))
1018  storeStats(1, nodeObject->getData().size());
1019 }
1020 
1021 bool
1023 {
1024  auto const ledgerSeq{srcLedger->info().seq};
1025  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1026  std::shared_ptr<Shard> shard;
1027  {
1028  std::lock_guard lock(mutex_);
1029  assert(init_);
1030 
1031  if (shardIndex != acquireIndex_)
1032  {
1033  JLOG(j_.trace())
1034  << "shard " << shardIndex << " is not being acquired";
1035  return false;
1036  }
1037 
1038  auto const it{shards_.find(shardIndex)};
1039  if (it == shards_.end())
1040  {
1041  JLOG(j_.error())
1042  << "shard " << shardIndex << " is not being acquired";
1043  return false;
1044  }
1045  shard = it->second;
1046  }
1047 
1048  auto const result{shard->storeLedger(srcLedger, nullptr)};
1049  storeStats(result.count, result.size);
1050  if (result.error || result.count == 0 || result.size == 0)
1051  return false;
1052 
1053  return setStoredInShard(shard, srcLedger);
1054 }
1055 
1056 void
1058 {
1060  {
1061  std::lock_guard lock(mutex_);
1062  assert(init_);
1063 
1064  shards.reserve(shards_.size());
1065  for (auto const& e : shards_)
1066  shards.push_back(e.second);
1067  }
1068 
1070  openFinals.reserve(openFinalLimit_);
1071 
1072  for (auto const& e : shards)
1073  {
1074  if (auto const shard{e.lock()}; shard && shard->isOpen())
1075  {
1076  shard->sweep();
1077 
1078  if (shard->getState() == Shard::final)
1079  openFinals.emplace_back(std::move(shard));
1080  }
1081  }
1082 
1083  if (openFinals.size() > openFinalLimit_)
1084  {
1085  JLOG(j_.trace()) << "Open shards exceed configured limit of "
1086  << openFinalLimit_ << " by "
1087  << (openFinals.size() - openFinalLimit_);
1088 
1089  // Try to close enough shards to be within the limit.
1090  // Sort ascending on last use so the oldest are removed first.
1091  std::sort(
1092  openFinals.begin(),
1093  openFinals.end(),
1094  [&](std::shared_ptr<Shard> const& lhsShard,
1095  std::shared_ptr<Shard> const& rhsShard) {
1096  return lhsShard->getLastUse() < rhsShard->getLastUse();
1097  });
1098 
1099  for (auto it{openFinals.cbegin()};
1100  it != openFinals.cend() && openFinals.size() > openFinalLimit_;)
1101  {
1102  if ((*it)->tryClose())
1103  it = openFinals.erase(it);
1104  else
1105  ++it;
1106  }
1107  }
1108 }
1109 
1110 bool
1112 {
1113  auto fail = [j = j_](std::string const& msg) {
1114  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1115  return false;
1116  };
1117 
1118  Config const& config{app_.config()};
1119  Section const& section{config.section(ConfigSection::shardDatabase())};
1120 
1121  {
1122  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1123  // A custom earliest ledger sequence can be set through the
1124  // configuration file using the 'earliest_seq' field under the
1125  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1126  // have a value greater than zero and be equally assigned in
1127  // both stanzas.
1128 
1129  std::uint32_t shardDBEarliestSeq{0};
1130  get_if_exists<std::uint32_t>(
1131  section, "earliest_seq", shardDBEarliestSeq);
1132 
1133  std::uint32_t nodeDBEarliestSeq{0};
1134  get_if_exists<std::uint32_t>(
1135  config.section(ConfigSection::nodeDatabase()),
1136  "earliest_seq",
1137  nodeDBEarliestSeq);
1138 
1139  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1140  {
1141  return fail(
1142  "and [" + ConfigSection::nodeDatabase() +
1143  "] define different 'earliest_seq' values");
1144  }
1145  }
1146 
1147  using namespace boost::filesystem;
1148  if (!get_if_exists<path>(section, "path", dir_))
1149  return fail("'path' missing");
1150 
1151  {
1152  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1153 
1154  Section const& historicalShardPaths =
1155  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1156 
1157  auto values = historicalShardPaths.values();
1158 
1159  std::sort(values.begin(), values.end());
1160  values.erase(std::unique(values.begin(), values.end()), values.end());
1161 
1162  for (auto const& s : values)
1163  {
1164  auto const dir = path(s);
1165  if (dir_ == dir)
1166  {
1167  return fail(
1168  "the 'path' cannot also be in the "
1169  "'historical_shard_path' section");
1170  }
1171 
1173  }
1174  }
1175 
1176  if (section.exists("ledgers_per_shard"))
1177  {
1178  // To be set only in standalone for testing
1179  if (!config.standalone())
1180  return fail("'ledgers_per_shard' only honored in stand alone");
1181 
1182  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1183  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1184  return fail("'ledgers_per_shard' must be a multiple of 256");
1185 
1188  }
1189 
1190  // NuDB is the default and only supported permanent storage backend
1191  backendName_ = get<std::string>(section, "type", "nudb");
1192  if (!boost::iequals(backendName_, "NuDB"))
1193  return fail("'type' value unsupported");
1194 
1195  return true;
1196 }
1197 
1200  uint256 const& hash,
1201  std::uint32_t ledgerSeq,
1202  FetchReport& fetchReport)
1203 {
1204  auto const shardIndex{seqToShardIndex(ledgerSeq)};
1205  std::shared_ptr<Shard> shard;
1206  {
1207  std::lock_guard lock(mutex_);
1208  auto const it{shards_.find(shardIndex)};
1209  if (it == shards_.end())
1210  return nullptr;
1211  shard = it->second;
1212  }
1213 
1214  return shard->fetchNodeObject(hash, fetchReport);
1215 }
1216 
1217 boost::optional<std::uint32_t>
1219  std::uint32_t validLedgerSeq,
1221 {
1222  if (validLedgerSeq < earliestLedgerSeq())
1223  return boost::none;
1224 
1225  auto const maxShardIndex{[this, validLedgerSeq]() {
1226  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1227  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1228  --shardIndex;
1229  return shardIndex;
1230  }()};
1231  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1232 
1233  // Check if the shard store has all shards
1234  if (shards_.size() >= maxNumShards)
1235  return boost::none;
1236 
1237  if (maxShardIndex < 1024 ||
1238  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1239  {
1240  // Small or mostly full index space to sample
1241  // Find the available indexes and select one at random
1243  available.reserve(maxNumShards - shards_.size());
1244 
1245  for (auto shardIndex = earliestShardIndex();
1246  shardIndex <= maxShardIndex;
1247  ++shardIndex)
1248  {
1249  if (shards_.find(shardIndex) == shards_.end() &&
1250  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1251  {
1252  available.push_back(shardIndex);
1253  }
1254  }
1255 
1256  if (available.empty())
1257  return boost::none;
1258 
1259  if (available.size() == 1)
1260  return available.front();
1261 
1262  return available[rand_int(
1263  0u, static_cast<std::uint32_t>(available.size() - 1))];
1264  }
1265 
1266  // Large, sparse index space to sample
1267  // Keep choosing indexes at random until an available one is found
1268  // chances of running more than 30 times is less than 1 in a billion
1269  for (int i = 0; i < 40; ++i)
1270  {
1271  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1272  if (shards_.find(shardIndex) == shards_.end() &&
1273  preparedIndexes_.find(shardIndex) == preparedIndexes_.end())
1274  {
1275  return shardIndex;
1276  }
1277  }
1278 
1279  assert(false);
1280  return boost::none;
1281 }
1282 
1283 void
1285  std::shared_ptr<Shard>& shard,
1286  bool writeSQLite,
1287  boost::optional<uint256> const& expectedHash)
1288 {
1289  taskQueue_->addTask([this,
1290  wptr = std::weak_ptr<Shard>(shard),
1291  writeSQLite,
1292  expectedHash]() {
1293  if (isStopping())
1294  return;
1295 
1296  auto shard{wptr.lock()};
1297  if (!shard)
1298  {
1299  JLOG(j_.debug()) << "Shard removed before being finalized";
1300  return;
1301  }
1302 
1303  if (!shard->finalize(writeSQLite, expectedHash))
1304  {
1305  if (isStopping())
1306  return;
1307 
1308  // Invalid or corrupt shard, remove it
1309  removeFailedShard(shard);
1310  return;
1311  }
1312 
1313  if (isStopping())
1314  return;
1315 
1316  {
1317  auto const boundaryIndex{shardBoundaryIndex()};
1318 
1319  std::lock_guard lock(mutex_);
1320  updateStatus(lock);
1321 
1322  if (shard->index() < boundaryIndex)
1323  {
1324  // This is a historical shard
1325  if (!historicalPaths_.empty() &&
1326  shard->getDir().parent_path() == dir_)
1327  {
1328  // Shard wasn't placed at a separate historical path
1329  JLOG(j_.warn()) << "shard " << shard->index()
1330  << " is not stored at a historical path";
1331  }
1332  }
1333 
1334  else
1335  {
1336  // Not a historical shard. Shift recent shards if necessary
1337  relocateOutdatedShards(lock);
1338  assert(!boundaryIndex || shard->index() - boundaryIndex <= 1);
1339 
1340  auto& recentShard = shard->index() == boundaryIndex
1343 
1344  // Set the appropriate recent shard index
1345  recentShard = shard->index();
1346 
1347  if (shard->getDir().parent_path() != dir_)
1348  {
1349  JLOG(j_.warn()) << "shard " << shard->index()
1350  << " is not stored at the path";
1351  }
1352  }
1353  }
1354 
1355  setFileStats();
1356 
1357  // Update peers with new shard index
1358  if (!app_.config().standalone() &&
1360  {
1361  protocol::TMPeerShardInfo message;
1362  PublicKey const& publicKey{app_.nodeIdentity().first};
1363  message.set_nodepubkey(publicKey.data(), publicKey.size());
1364  message.set_shardindexes(std::to_string(shard->index()));
1365  app_.overlay().foreach(send_always(std::make_shared<Message>(
1366  message, protocol::mtPEER_SHARD_INFO)));
1367  }
1368  });
1369 }
1370 
1371 void
1373 {
1375  {
1376  std::lock_guard lock(mutex_);
1377  if (shards_.empty())
1378  return;
1379 
1380  shards.reserve(shards_.size());
1381  for (auto const& e : shards_)
1382  shards.push_back(e.second);
1383  }
1384 
1385  std::uint64_t sumSz{0};
1386  std::uint32_t sumFd{0};
1387  std::uint32_t numShards{0};
1388  for (auto const& e : shards)
1389  {
1390  if (auto const shard{e.lock()}; shard)
1391  {
1392  auto const [sz, fd] = shard->getFileInfo();
1393  sumSz += sz;
1394  sumFd += fd;
1395  ++numShards;
1396  }
1397  }
1398 
1399  std::lock_guard lock(mutex_);
1400  fileSz_ = sumSz;
1401  fdRequired_ = sumFd;
1402  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1403 
1404  if (!canAdd_)
1405  return;
1406 
1407  if (auto const count = numHistoricalShards(lock);
1408  count >= maxHistoricalShards_)
1409  {
1411  {
1412  // In order to avoid excessive output, don't produce
1413  // this warning if the server isn't configured to
1414  // store historical shards.
1415  JLOG(j_.warn()) << "maximum number of historical shards reached";
1416  }
1417 
1418  canAdd_ = false;
1419  }
1420  else if (!sufficientStorage(
1421  maxHistoricalShards_ - count,
1423  lock))
1424  {
1425  JLOG(j_.warn())
1426  << "maximum shard store size exceeds available storage space";
1427 
1428  canAdd_ = false;
1429  }
1430 }
1431 
1432 void
1434 {
1435  if (!shards_.empty())
1436  {
1438  for (auto const& e : shards_)
1439  if (e.second->getState() == Shard::final)
1440  rs.insert(e.second->index());
1441  status_ = to_string(rs);
1442  }
1443  else
1444  status_.clear();
1445 }
1446 
1447 bool
1449  std::uint32_t numShards,
1450  PathDesignation pathDesignation,
1451  std::lock_guard<std::mutex> const&) const
1452 {
1453  try
1454  {
1455  std::vector<std::uint64_t> capacities;
1456 
1457  if (pathDesignation == PathDesignation::historical &&
1459  {
1460  capacities.reserve(historicalPaths_.size());
1461 
1462  for (auto const& path : historicalPaths_)
1463  {
1464  // Get the available storage for each historical path
1465  auto const availableSpace =
1466  boost::filesystem::space(path).available;
1467 
1468  capacities.push_back(availableSpace);
1469  }
1470  }
1471  else
1472  {
1473  // Get the available storage for the main shard path
1474  capacities.push_back(boost::filesystem::space(dir_).available);
1475  }
1476 
1477  for (std::uint64_t const capacity : capacities)
1478  {
1479  // Leverage all the historical shard paths to
1480  // see if collectively they can fit the specified
1481  // number of shards. For this to work properly,
1482  // each historical path must correspond to a separate
1483  // physical device or filesystem.
1484 
1485  auto const shardCap = capacity / avgShardFileSz_;
1486  if (numShards <= shardCap)
1487  return true;
1488 
1489  numShards -= shardCap;
1490  }
1491  }
1492  catch (std::exception const& e)
1493  {
1494  JLOG(j_.fatal()) << "Exception caught in function " << __func__
1495  << ". Error: " << e.what();
1496  return false;
1497  }
1498 
1499  return false;
1500 }
1501 
1502 bool
1504  std::shared_ptr<Shard>& shard,
1505  std::shared_ptr<Ledger const> const& ledger)
1506 {
1507  if (!shard->setLedgerStored(ledger))
1508  {
1509  // Invalid or corrupt shard, remove it
1510  removeFailedShard(shard);
1511  return false;
1512  }
1513 
1514  if (shard->getState() == Shard::complete)
1515  {
1516  std::lock_guard lock(mutex_);
1517  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1518  {
1519  if (shard->index() == acquireIndex_)
1520  acquireIndex_ = 0;
1521 
1522  finalizeShard(it->second, false, boost::none);
1523  }
1524  else
1525  {
1526  JLOG(j_.debug())
1527  << "shard " << shard->index() << " is no longer being acquired";
1528  }
1529  }
1530 
1531  setFileStats();
1532  return true;
1533 }
1534 
1535 void
1537 {
1538  {
1539  std::lock_guard lock(mutex_);
1540 
1541  if (shard->index() == acquireIndex_)
1542  acquireIndex_ = 0;
1543 
1544  if (shard->index() == latestShardIndex_)
1545  latestShardIndex_ = boost::none;
1546 
1547  if (shard->index() == secondLatestShardIndex_)
1548  secondLatestShardIndex_ = boost::none;
1549 
1550  if ((shards_.erase(shard->index()) > 0) &&
1551  shard->getState() == Shard::final)
1552  {
1553  updateStatus(lock);
1554  }
1555  }
1556 
1557  shard->removeOnDestroy();
1558 
1559  // Reset the shared_ptr to invoke the shard's
1560  // destructor and remove it from the server
1561  shard.reset();
1562  setFileStats();
1563 }
1564 
1567 {
1568  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1569 
1570  if (validIndex < earliestLedgerSeq())
1571  return 0;
1572 
1573  // Shards with an index earlier than the recent shard boundary index
1574  // are considered historical. The three shards at or later than
1575  // this index consist of the two most recently validated shards
1576  // and the shard still in the process of being built by live
1577  // transactions.
1578  return seqToShardIndex(validIndex) - 1;
1579 }
1580 
1583  std::lock_guard<std::mutex> const& lock) const
1584 {
1585  auto const boundaryIndex{shardBoundaryIndex()};
1586  return std::count_if(
1587  shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) {
1588  return entry.first < boundaryIndex;
1589  });
1590 }
1591 
1592 void
1594  std::lock_guard<std::mutex> const& lock)
1595 {
1596  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1597  cur || prev)
1598  {
1599  auto const latestShardIndex =
1601 
1602  auto const separateHistoricalPath = !historicalPaths_.empty();
1603 
1604  auto const removeShard =
1605  [this](std::uint32_t const shardIndex) -> void {
1606  canAdd_ = false;
1607 
1608  if (auto it = shards_.find(shardIndex); it != shards_.end())
1609  {
1610  if (it->second)
1611  removeFailedShard(it->second);
1612  else
1613  {
1614  JLOG(j_.warn()) << "can't find shard to remove";
1615  }
1616  }
1617  else
1618  {
1619  JLOG(j_.warn()) << "can't find shard to remove";
1620  }
1621  };
1622 
1623  auto const keepShard =
1624  [this, &lock, removeShard, separateHistoricalPath](
1625  std::uint32_t const shardIndex) -> bool {
1627  {
1628  JLOG(j_.error())
1629  << "maximum number of historical shards reached";
1630 
1631  removeShard(shardIndex);
1632  return false;
1633  }
1634  if (separateHistoricalPath &&
1636  {
1637  JLOG(j_.error()) << "insufficient storage space available";
1638 
1639  removeShard(shardIndex);
1640  return false;
1641  }
1642 
1643  return true;
1644  };
1645 
1646  // Move a shard from the main shard path to a historical shard
1647  // path by copying the contents, and creating a new shard.
1648  auto const moveShard = [this,
1649  &lock](std::uint32_t const shardIndex) -> void {
1650  auto const dst = chooseHistoricalPath(lock);
1651 
1652  if (auto it = shards_.find(shardIndex); it != shards_.end())
1653  {
1654  auto& shard{it->second};
1655 
1656  // Close any open file descriptors before moving the shard
1657  // directory. Don't call removeOnDestroy since that would
1658  // attempt to close the fds after the directory has been moved.
1659  if (!shard->tryClose())
1660  {
1661  JLOG(j_.warn())
1662  << "can't close shard to move to historical path";
1663  return;
1664  }
1665 
1666  try
1667  {
1668  // Move the shard directory to the new path
1669  boost::filesystem::rename(
1670  shard->getDir().string(),
1671  dst / std::to_string(shardIndex));
1672  }
1673  catch (...)
1674  {
1675  JLOG(j_.error()) << "shard " << shardIndex
1676  << " failed to move to historical storage";
1677  return;
1678  }
1679 
1680  // Create a shard instance at the new location
1681  shard =
1682  std::make_shared<Shard>(app_, *this, shardIndex, dst, j_);
1683 
1684  // Open the new shard
1685  if (!shard->init(scheduler_, *ctx_))
1686  {
1687  JLOG(j_.error()) << "shard " << shardIndex
1688  << " failed to open in historical storage";
1689  shard->removeOnDestroy();
1690  shard.reset();
1691  }
1692  }
1693  else
1694  {
1695  JLOG(j_.warn())
1696  << "can't find shard to move to historical path";
1697  }
1698  };
1699 
1700  // See if either of the recent shards needs to be updated
1701  bool const curNotSynched =
1702  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1703  bool const prevNotSynched = secondLatestShardIndex_ &&
1704  *secondLatestShardIndex_ != latestShardIndex - 1;
1705 
1706  // A new shard has been published. Move outdated
1707  // shards to historical storage as needed
1708  if (curNotSynched || prevNotSynched)
1709  {
1710  if (prev)
1711  {
1712  // Move the formerly second latest shard to historical storage
1713  if (keepShard(*prev) && separateHistoricalPath)
1714  {
1715  moveShard(*prev);
1716  }
1717 
1718  prev = boost::none;
1719  }
1720 
1721  if (cur)
1722  {
1723  // The formerly latest shard is now the second latest
1724  if (cur == latestShardIndex - 1)
1725  {
1726  prev = cur;
1727  }
1728 
1729  // The formerly latest shard is no longer a 'recent' shard
1730  else
1731  {
1732  // Move the formerly latest shard to historical storage
1733  if (keepShard(*cur) && separateHistoricalPath)
1734  {
1735  moveShard(*cur);
1736  }
1737  }
1738 
1739  cur = boost::none;
1740  }
1741  }
1742  }
1743 }
1744 
1745 auto
1747  std::uint32_t shardIndex,
1749  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1750 {
1751  // Any shard earlier than the two most recent shards is a historical shard
1752  auto const boundaryIndex{shardBoundaryIndex()};
1753  auto const isHistoricalShard = shardIndex < boundaryIndex;
1754 
1755  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1758 
1759  // Check shard count and available storage space
1760  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1761  {
1762  JLOG(j_.error()) << "maximum number of historical shards reached";
1763  canAdd_ = false;
1764  return boost::none;
1765  }
1766  if (!sufficientStorage(1, designation, lock))
1767  {
1768  JLOG(j_.error()) << "insufficient storage space available";
1769  canAdd_ = false;
1770  return boost::none;
1771  }
1772 
1773  return designation;
1774 }
1775 
1776 boost::filesystem::path
1778 {
1779  // If not configured with separate historical paths,
1780  // use the main path (dir_) by default.
1781  if (historicalPaths_.empty())
1782  return dir_;
1783 
1784  boost::filesystem::path historicalShardPath;
1785  std::vector<boost::filesystem::path> potentialPaths;
1786 
1787  for (boost::filesystem::path const& path : historicalPaths_)
1788  {
1789  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1790  potentialPaths.push_back(path);
1791  }
1792 
1793  if (potentialPaths.empty())
1794  {
1795  JLOG(j_.error()) << "failed to select a historical shard path";
1796  return "";
1797  }
1798 
1799  std::sample(
1800  potentialPaths.begin(),
1801  potentialPaths.end(),
1802  &historicalShardPath,
1803  1,
1804  default_prng());
1805 
1806  return historicalShardPath;
1807 }
1808 
1809 bool
1811 {
1812 #if BOOST_OS_LINUX
1813  // Each historical shard path must correspond
1814  // to a directory on a distinct device or file system.
1815  // Currently, this constraint is enforced only on Linux.
1818 
1819  for (auto const& path : historicalPaths_)
1820  {
1821  struct statvfs buffer;
1822  if (statvfs(path.c_str(), &buffer))
1823  {
1824  JLOG(j_.error())
1825  << "failed to acquire stats for 'historical_shard_path': "
1826  << path;
1827  return false;
1828  }
1829 
1830  filesystemIDs[buffer.f_fsid].push_back(path.string());
1831  }
1832 
1833  bool ret = true;
1834  for (auto const& entry : filesystemIDs)
1835  {
1836  // Check to see if any of the paths are stored on the same file system
1837  if (entry.second.size() > 1)
1838  {
1839  // Two or more historical storage paths
1840  // correspond to the same file system.
1841  JLOG(j_.error())
1842  << "The following paths correspond to the same filesystem: "
1843  << boost::algorithm::join(entry.second, ", ")
1844  << ". Each configured historical storage path should"
1845  " be on a unique device or filesystem.";
1846 
1847  ret = false;
1848  }
1849  }
1850 
1851  return ret;
1852 
1853 #else
1854  // The requirement that each historical storage path
1855  // corresponds to a distinct device or file system is
1856  // enforced only on Linux, so on other platforms
1857  // keep track of the available capacities for each
1858  // path. Issue a warning if we suspect any of the paths
1859  // may violate this requirement.
1860 
1861  // Map byte counts to each path that shares that byte count.
1863  uniqueCapacities(historicalPaths_.size());
1864 
1865  for (auto const& path : historicalPaths_)
1866  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1867  path.string());
1868 
1869  for (auto const& entry : uniqueCapacities)
1870  {
1871  // Check to see if any paths have the same amount of available bytes.
1872  if (entry.second.size() > 1)
1873  {
1874  // Two or more historical storage paths may
1875  // correspond to the same device or file system.
1876  JLOG(j_.warn())
1877  << "Each of the following paths have " << entry.first
1878  << " bytes free, and may be located on the same device"
1879  " or file system: "
1880  << boost::algorithm::join(entry.second, ", ")
1881  << ". Each configured historical storage path should"
1882  " be on a unique device or file system.";
1883  }
1884  }
1885 #endif
1886 
1887  return true;
1888 }
1889 
1890 //------------------------------------------------------------------------------
1891 
1894  Application& app,
1895  Stoppable& parent,
1896  Scheduler& scheduler,
1897  int readThreads,
1898  beast::Journal j)
1899 {
1900  // The shard store is optional. Future changes will require it.
1901  Section const& section{
1903  if (section.empty())
1904  return nullptr;
1905 
1906  return std::make_unique<DatabaseShardImp>(
1907  app, parent, "ShardStore", scheduler, readThreads, j);
1908 }
1909 
1910 } // namespace NodeStore
1911 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1893
beast::Journal::fatal
Stream fatal() const
Definition: Journal.h:339
ripple::SizedItem::openFinalLimit
@ openFinalLimit
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1196
ripple::Application
Definition: Application.h:102
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:217
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:214
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:108
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:86
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:172
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:234
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:170
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Store a ledger from a different database.
Definition: DatabaseShardImp.cpp:1022
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:50
ripple::NodeStore::Shard::acquire
static constexpr State acquire
Definition: Shard.h:60
std::string
STL class.
std::shared_ptr< Ledger >
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1565
ripple::SizedItem
SizedItem
Definition: Config.h:48
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::Stoppable::stopped
void stopped()
Called by derived classes to indicate that the stoppable has stopped.
Definition: Stoppable.cpp:72
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:165
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:411
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:216
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:240
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:209
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
std::set::find
T find(T... args)
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::CashFilter::none
@ none
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:179
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:622
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1433
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:236
std::set::emplace
T emplace(T... args)
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::FetchReport
Contains information about a fetch operation.
Definition: ripple/nodestore/Scheduler.h:32
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1717
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(std::shared_ptr< Shard > &shard, bool writeSQLite, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1284
boost
Definition: IPAddress.h:117
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:45
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:205
ripple::NodeStore::Shard::complete
static constexpr State complete
Definition: Shard.h:61
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data, bool hasHash)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:293
ripple::NodeStore::DatabaseShardImp::openFinalLimit_
const std::uint32_t openFinalLimit_
Definition: DatabaseShardImp.h:223
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:47
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1057
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:200
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:99
ripple::NodeStore::DatabaseShardImp::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq, FetchReport &fetchReport) override
Definition: DatabaseShardImp.cpp:1199
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:220
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:197
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:420
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:971
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::Config::reporting
bool reporting() const
Definition: Config.h:270
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:201
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1777
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1448
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:173
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:684
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:92
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:67
std::ofstream
STL class.
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:191
ripple::Config::standalone
bool standalone() const
Definition: Config.h:265
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1536
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:438
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t ledgerSeq) override
Store the object.
Definition: DatabaseShardImp.cpp:988
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1810
std::set::erase
T erase(T... args)
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1111
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:235
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t ledgerSeq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:183
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:232
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1372
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:188
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:178
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:60
ripple::NodeStore::Database::fetchNodeObject
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous)
Fetch a node object.
Definition: Database.cpp:145
std::transform
T transform(T... args)
ripple::NodeStore::Database::storeStats
void storeStats(std::uint64_t count, std::uint64_t sz)
Definition: Database.h:249
ripple::NodeStore::DatabaseShardImp::preparedIndexes_
std::set< std::uint32_t > preparedIndexes_
Definition: DatabaseShardImp.h:185
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:73
ripple::NodeStore::Shard::final
static constexpr State final
Definition: Shard.h:63
std::weak_ptr
STL class.
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::Database::stopReadThreads
void stopReadThreads()
Definition: Database.cpp:78
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:206
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:171
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > const &)
Definition: DatabaseShardImp.cpp:1218
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:226
std::vector::emplace_back
T emplace_back(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:735
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:203
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:238
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t ledgerSeq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:544
std::vector::begin
T begin(T... args)
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1582
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:210
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1593
ripple::NodeStore::DatabaseShardImp::onChildrenStopped
void onChildrenStopped() override
Override called when all children have stopped.
Definition: DatabaseShardImp.cpp:706
ripple::Application::overlay
virtual Overlay & overlay()=0
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex() const
Definition: DatabaseShardImp.cpp:1566
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::prepareShards
bool prepareShards(std::vector< std::uint32_t > const &shardIndexes) override
Prepare one or more shard indexes to be imported into the database.
Definition: DatabaseShardImp.cpp:302
std::unique
T unique(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1633
ripple::NodeStore::DatabaseShardImp::setStoredInShard
bool setStoredInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1503
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:194
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:239
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::shards_
std::unordered_map< std::uint32_t, std::shared_ptr< Shard > > shards_
Definition: DatabaseShardImp.h:182
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::PublisherStatus::available
@ available
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1746
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:693
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:176
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:200