rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 #if BOOST_OS_LINUX
36 #include <sys/statvfs.h>
37 #endif
38 
39 namespace ripple {
40 namespace NodeStore {
41 
43  Application& app,
44  Stoppable& parent,
45  std::string const& name,
46  Scheduler& scheduler,
47  int readThreads,
49  : DatabaseShard(
50  name,
51  parent,
52  scheduler,
53  readThreads,
54  app.config().section(ConfigSection::shardDatabase()),
55  j)
56  , app_(app)
57  , parent_(parent)
58  , taskQueue_(std::make_unique<TaskQueue>(*this))
59  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
60  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192))
61 {
62 }
63 
65 {
66  onStop();
67 }
68 
69 bool
71 {
72  {
73  std::lock_guard lock(mutex_);
74  if (init_)
75  {
76  JLOG(j_.error()) << "already initialized";
77  return false;
78  }
79 
80  if (!initConfig(lock))
81  {
82  JLOG(j_.error()) << "invalid configuration file settings";
83  return false;
84  }
85 
86  try
87  {
88  using namespace boost::filesystem;
89 
90  // Consolidate the main storage path and all
91  // historical paths
92  std::vector<path> paths{dir_};
93  paths.insert(
94  paths.end(), historicalPaths_.begin(), historicalPaths_.end());
95 
96  for (auto const& path : paths)
97  {
98  try
99  {
100  if (exists(path))
101  {
102  if (!is_directory(path))
103  {
104  JLOG(j_.error()) << path << " must be a directory";
105  return false;
106  }
107  }
108  else if (!create_directories(path))
109  {
110  JLOG(j_.error())
111  << "failed to create path: " + path.string();
112  return false;
113  }
114  }
115  catch (...)
116  {
117  JLOG(j_.error())
118  << "failed to create path: " + path.string();
119  return false;
120  }
121  }
122 
124  {
125  // Check historical paths for duplicated
126  // filesystems
127  if (!checkHistoricalPaths())
128  return false;
129  }
130 
131  ctx_ = std::make_unique<nudb::context>();
132  ctx_->start();
133 
134  // Find shards
135  for (auto const& path : paths)
136  {
137  for (auto const& d : directory_iterator(path))
138  {
139  if (!is_directory(d))
140  continue;
141 
142  auto const shardDir = d.path();
143 
144  // Check shard directory name is numeric
145  auto dirName = shardDir.stem().string();
146  if (!std::all_of(
147  dirName.begin(), dirName.end(), [](auto c) {
148  return ::isdigit(static_cast<unsigned char>(c));
149  }))
150  {
151  continue;
152  }
153 
154  auto const shardIndex{std::stoul(dirName)};
155  if (shardIndex < earliestShardIndex())
156  {
157  JLOG(j_.error())
158  << "shard " << shardIndex
159  << " comes before earliest shard index "
160  << earliestShardIndex();
161  return false;
162  }
163 
164  // Check if a previous import failed
165  if (is_regular_file(shardDir / importMarker_))
166  {
167  JLOG(j_.warn())
168  << "shard " << shardIndex
169  << " previously failed import, removing";
170  remove_all(shardDir);
171  continue;
172  }
173 
174  auto shard{std::make_unique<Shard>(
175  app_, *this, shardIndex, shardDir.parent_path(), j_)};
176  if (!shard->open(scheduler_, *ctx_))
177  {
178  // Remove corrupted or legacy shard
179  shard->removeOnDestroy();
180  JLOG(j_.warn())
181  << "shard " << shardIndex << " removed, "
182  << (shard->isLegacy() ? "legacy" : "corrupted")
183  << " shard";
184  continue;
185  }
186 
187  if (shard->isFinal())
188  {
189  shards_.emplace(
190  shardIndex,
191  ShardInfo(
192  std::move(shard), ShardInfo::State::final));
193  }
194  else if (shard->isBackendComplete())
195  {
196  auto const result{shards_.emplace(
197  shardIndex,
198  ShardInfo(
199  std::move(shard), ShardInfo::State::none))};
201  result.first->second, true, lock, boost::none);
202  }
203  else
204  {
205  if (acquireIndex_ != 0)
206  {
207  JLOG(j_.error())
208  << "more than one shard being acquired";
209  return false;
210  }
211 
212  shards_.emplace(
213  shardIndex,
214  ShardInfo(
215  std::move(shard), ShardInfo::State::acquire));
216  acquireIndex_ = shardIndex;
217  }
218  }
219  }
220  }
221  catch (std::exception const& e)
222  {
223  JLOG(j_.error())
224  << "exception " << e.what() << " in function " << __func__;
225  return false;
226  }
227 
228  updateStatus(lock);
230  init_ = true;
231  }
232 
233  setFileStats();
234  return true;
235 }
236 
237 boost::optional<std::uint32_t>
239 {
240  boost::optional<std::uint32_t> shardIndex;
241 
242  {
243  std::lock_guard lock(mutex_);
244  assert(init_);
245 
246  if (acquireIndex_ != 0)
247  {
248  if (auto it{shards_.find(acquireIndex_)}; it != shards_.end())
249  return it->second.shard->prepare();
250  assert(false);
251  return boost::none;
252  }
253 
254  if (!canAdd_)
255  return boost::none;
256 
257  shardIndex = findAcquireIndex(validLedgerSeq, lock);
258  }
259 
260  if (!shardIndex)
261  {
262  JLOG(j_.debug()) << "no new shards to add";
263  {
264  std::lock_guard lock(mutex_);
265  canAdd_ = false;
266  }
267  return boost::none;
268  }
269 
270  auto const pathDesignation = [this, shardIndex = *shardIndex]() {
271  std::lock_guard lock(mutex_);
272  return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
273  }();
274 
275  if (!pathDesignation)
276  return false;
277 
278  auto const needsHistoricalPath =
279  *pathDesignation == PathDesignation::historical;
280 
281  auto shard = [this, shardIndex, needsHistoricalPath] {
282  std::lock_guard lock(mutex_);
283  return std::make_unique<Shard>(
284  app_,
285  *this,
286  *shardIndex,
287  (needsHistoricalPath ? chooseHistoricalPath(lock) : ""),
288  j_);
289  }();
290 
291  if (!shard->open(scheduler_, *ctx_))
292  return boost::none;
293 
294  auto const seq{shard->prepare()};
295  {
296  std::lock_guard lock(mutex_);
297  shards_.emplace(
298  *shardIndex,
299  ShardInfo(std::move(shard), ShardInfo::State::acquire));
300  acquireIndex_ = *shardIndex;
301  }
302  return seq;
303 }
304 
305 bool
307 {
308  auto fail = [j = j_, shardIndex](std::string const& msg) {
309  JLOG(j.error()) << "shard " << shardIndex << " " << msg;
310  return false;
311  };
312  std::lock_guard lock(mutex_);
313  assert(init_);
314 
315  if (!canAdd_)
316  return fail("cannot be stored at this time");
317 
318  if (shardIndex < earliestShardIndex())
319  {
320  return fail(
321  "comes before earliest shard index " +
323  }
324 
325  // If we are synced to the network, check if the shard index
326  // is greater or equal to the current shard.
327  auto seqCheck = [&](std::uint32_t seq) {
328  // seq will be greater than zero if valid
329  if (seq >= earliestLedgerSeq() && shardIndex >= seqToShardIndex(seq))
330  return fail("has an invalid index");
331  return true;
332  };
333  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
335  {
336  return false;
337  }
338 
339  if (shards_.find(shardIndex) != shards_.end())
340  {
341  JLOG(j_.debug()) << "shard " << shardIndex
342  << " is already stored or queued for import";
343  return false;
344  }
345 
346  // Any shard earlier than the two most recent shards
347  // is a historical shard
348  bool const isHistoricalShard = shardIndex < shardBoundaryIndex(lock);
349  auto const numHistShards = numHistoricalShards(lock);
350 
351  // Check shard count and available storage space
352  if (isHistoricalShard && numHistShards >= maxHistoricalShards_)
353  return fail("maximum number of historical shards reached");
354  if (!sufficientStorage(
355  1,
356  isHistoricalShard ? PathDesignation::historical
358  lock))
359  return fail("insufficient storage space available");
360 
361  shards_.emplace(shardIndex, ShardInfo(nullptr, ShardInfo::State::import));
362  return true;
363 }
364 
365 void
367 {
368  std::lock_guard lock(mutex_);
369  assert(init_);
370 
371  if (auto const it{shards_.find(shardIndex)};
372  it != shards_.end() && it->second.state == ShardInfo::State::import)
373  {
374  shards_.erase(it);
375  }
376 }
377 
380 {
382  {
383  std::lock_guard lock(mutex_);
384  assert(init_);
385 
386  for (auto const& e : shards_)
387  if (e.second.state == ShardInfo::State::import)
388  rs.insert(e.first);
389  }
390 
391  if (rs.empty())
392  return {};
393 
394  return to_string(rs);
395 };
396 
397 bool
399  std::uint32_t shardIndex,
400  boost::filesystem::path const& srcDir)
401 {
402  using namespace boost::filesystem;
403  try
404  {
405  if (!is_directory(srcDir) || is_empty(srcDir))
406  {
407  JLOG(j_.error()) << "invalid source directory " << srcDir.string();
408  return false;
409  }
410  }
411  catch (std::exception const& e)
412  {
413  JLOG(j_.error()) << "exception " << e.what() << " in function "
414  << __func__;
415  return false;
416  }
417 
418  auto expectedHash = app_.getLedgerMaster().walkHashBySeq(
420 
421  if (!expectedHash)
422  {
423  JLOG(j_.error()) << "shard " << shardIndex
424  << " expected hash not found";
425  return false;
426  }
427 
428  auto renameDir = [&](path const& src, path const& dst) {
429  try
430  {
431  rename(src, dst);
432  }
433  catch (std::exception const& e)
434  {
435  JLOG(j_.error())
436  << "exception " << e.what() << " in function " << __func__;
437  return false;
438  }
439  return true;
440  };
441 
442  path dstDir;
443  {
444  std::lock_guard lock(mutex_);
445  assert(init_);
446 
447  // Check shard is prepared
448  if (auto const it{shards_.find(shardIndex)}; it == shards_.end() ||
449  it->second.shard || it->second.state != ShardInfo::State::import)
450  {
451  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
452  return false;
453  }
454 
455  auto const pathDesignation =
456  prepareForNewShard(shardIndex, numHistoricalShards(lock), lock);
457 
458  if (!pathDesignation)
459  {
460  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
461  return false;
462  }
463 
464  auto const needsHistoricalPath =
465  *pathDesignation == PathDesignation::historical;
466  dstDir = needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
467  }
468 
469  dstDir /= std::to_string(shardIndex);
470 
471  // Rename source directory to the shard database directory
472  if (!renameDir(srcDir, dstDir))
473  return false;
474 
475  // Create the new shard
476  auto shard{std::make_unique<Shard>(
477  app_, *this, shardIndex, dstDir.parent_path(), j_)};
478 
479  if (!shard->open(scheduler_, *ctx_) || !shard->isBackendComplete())
480  {
481  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
482  shard.reset();
483  renameDir(dstDir, srcDir);
484  return false;
485  }
486 
487  {
488  std::lock_guard lock(mutex_);
489  auto const it{shards_.find(shardIndex)};
490  if (it == shards_.end() || it->second.shard ||
491  it->second.state != ShardInfo::State::import)
492  {
493  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
494  shard.reset();
495  renameDir(dstDir, srcDir);
496  return false;
497  }
498 
499  it->second.shard = std::move(shard);
500  finalizeShard(it->second, true, lock, expectedHash);
501  }
502 
503  return true;
504 }
505 
508 {
509  auto const shardIndex{seqToShardIndex(seq)};
510  {
512  ShardInfo::State state;
513  {
514  std::lock_guard lock(mutex_);
515  assert(init_);
516 
517  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
518  {
519  shard = it->second.shard;
520  state = it->second.state;
521  }
522  else
523  return {};
524  }
525 
526  // Check if the ledger is stored in a final shard
527  // or in the shard being acquired
528  switch (state)
529  {
531  break;
533  if (shard->containsLedger(seq))
534  break;
535  [[fallthrough]];
536  default:
537  return {};
538  }
539  }
540 
541  auto nObj{fetch(hash, seq)};
542  if (!nObj)
543  return {};
544 
545  auto fail = [this, seq](std::string const& msg) -> std::shared_ptr<Ledger> {
546  JLOG(j_.error()) << "shard " << seqToShardIndex(seq) << " " << msg;
547  return {};
548  };
549 
550  auto ledger{std::make_shared<Ledger>(
551  deserializePrefixedHeader(makeSlice(nObj->getData())),
552  app_.config(),
553  *app_.getShardFamily())};
554 
555  if (ledger->info().seq != seq)
556  {
557  return fail(
558  "encountered invalid ledger sequence " + std::to_string(seq));
559  }
560  if (ledger->info().hash != hash)
561  {
562  return fail(
563  "encountered invalid ledger hash " + to_string(hash) +
564  " on sequence " + std::to_string(seq));
565  }
566 
567  ledger->setFull();
568  if (!ledger->stateMap().fetchRoot(
569  SHAMapHash{ledger->info().accountHash}, nullptr))
570  {
571  return fail(
572  "is missing root STATE node on hash " + to_string(hash) +
573  " on sequence " + std::to_string(seq));
574  }
575 
576  if (ledger->info().txHash.isNonZero())
577  {
578  if (!ledger->txMap().fetchRoot(
579  SHAMapHash{ledger->info().txHash}, nullptr))
580  {
581  return fail(
582  "is missing root TXN node on hash " + to_string(hash) +
583  " on sequence " + std::to_string(seq));
584  }
585  }
586  return ledger;
587 }
588 
589 void
591 {
592  if (ledger->info().hash.isZero())
593  {
594  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
595  << ledger->info().seq;
596  return;
597  }
598  if (ledger->info().accountHash.isZero())
599  {
600  JLOG(j_.error()) << "zero account hash for ledger sequence "
601  << ledger->info().seq;
602  return;
603  }
604  if (ledger->stateMap().getHash().isNonZero() &&
605  !ledger->stateMap().isValid())
606  {
607  JLOG(j_.error()) << "invalid state map for ledger sequence "
608  << ledger->info().seq;
609  return;
610  }
611  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
612  {
613  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
614  << ledger->info().seq;
615  return;
616  }
617 
618  auto const shardIndex{seqToShardIndex(ledger->info().seq)};
620  {
621  std::lock_guard lock(mutex_);
622  assert(init_);
623 
624  if (shardIndex != acquireIndex_)
625  {
626  JLOG(j_.trace())
627  << "shard " << shardIndex << " is not being acquired";
628  return;
629  }
630 
631  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
632  shard = it->second.shard;
633  else
634  {
635  JLOG(j_.error())
636  << "shard " << shardIndex << " is not being acquired";
637  return;
638  }
639  }
640 
641  storeLedgerInShard(shard, ledger);
642 }
643 
646 {
647  std::lock_guard lock(mutex_);
648  assert(init_);
649 
650  return status_;
651 }
652 
653 void
655 {
657  {
658  std::lock_guard lock(mutex_);
659  assert(init_);
660 
661  // Only shards with a state of final should be validated
662  for (auto& e : shards_)
663  if (e.second.state == ShardInfo::State::final)
664  shards.push_back(e.second.shard);
665 
666  if (shards.empty())
667  return;
668 
669  JLOG(j_.debug()) << "Validating shards " << status_;
670  }
671 
672  for (auto const& e : shards)
673  {
674  if (auto shard{e.lock()}; shard)
675  shard->finalize(true, boost::none);
676  }
677 
679 }
680 
681 void
683 {
684  // Stop read threads in base before data members are destroyed
685  stopThreads();
686 
687  std::lock_guard lock(mutex_);
688  if (shards_.empty())
689  return;
690 
691  // Notify and destroy shards
692  for (auto& e : shards_)
693  {
694  if (e.second.shard)
695  e.second.shard->stop();
696  }
697  shards_.clear();
698 }
699 
700 void
702 {
703  {
704  std::lock_guard lock(mutex_);
705  assert(init_);
706 
707  // Only the application local node store can be imported
708  if (&source != &app_.getNodeStore())
709  {
710  assert(false);
711  JLOG(j_.error()) << "invalid source database";
712  return;
713  }
714 
715  std::uint32_t earliestIndex;
716  std::uint32_t latestIndex;
717  {
718  auto loadLedger = [&](bool ascendSort =
719  true) -> boost::optional<std::uint32_t> {
721  std::uint32_t seq;
722  std::tie(ledger, seq, std::ignore) = loadLedgerHelper(
723  "WHERE LedgerSeq >= " +
725  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
726  " limit 1",
727  app_,
728  false);
729  if (!ledger || seq == 0)
730  {
731  JLOG(j_.error()) << "no suitable ledgers were found in"
732  " the SQLite database to import";
733  return boost::none;
734  }
735  return seq;
736  };
737 
738  // Find earliest ledger sequence stored
739  auto seq{loadLedger()};
740  if (!seq)
741  return;
742  earliestIndex = seqToShardIndex(*seq);
743 
744  // Consider only complete shards
745  if (seq != firstLedgerSeq(earliestIndex))
746  ++earliestIndex;
747 
748  // Find last ledger sequence stored
749  seq = loadLedger(false);
750  if (!seq)
751  return;
752  latestIndex = seqToShardIndex(*seq);
753 
754  // Consider only complete shards
755  if (seq != lastLedgerSeq(latestIndex))
756  --latestIndex;
757 
758  if (latestIndex < earliestIndex)
759  {
760  JLOG(j_.error()) << "no suitable ledgers were found in"
761  " the SQLite database to import";
762  return;
763  }
764  }
765 
766  auto numHistShards = this->numHistoricalShards(lock);
767 
768  // Import the shards
769  for (std::uint32_t shardIndex = earliestIndex;
770  shardIndex <= latestIndex;
771  ++shardIndex)
772  {
773  auto const pathDesignation =
774  prepareForNewShard(shardIndex, numHistShards, lock);
775 
776  if (!pathDesignation)
777  break;
778 
779  auto const needsHistoricalPath =
780  *pathDesignation == PathDesignation::historical;
781 
782  // Skip if already stored
783  if (shardIndex == acquireIndex_ ||
784  shards_.find(shardIndex) != shards_.end())
785  {
786  JLOG(j_.debug()) << "shard " << shardIndex << " already exists";
787  continue;
788  }
789 
790  // Verify SQLite ledgers are in the node store
791  {
792  auto const firstSeq{firstLedgerSeq(shardIndex)};
793  auto const lastSeq{
794  std::max(firstSeq, lastLedgerSeq(shardIndex))};
795  auto const numLedgers{
796  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
797  : ledgersPerShard_};
798  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
799  if (ledgerHashes.size() != numLedgers)
800  continue;
801 
802  bool valid{true};
803  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
804  {
805  if (!source.fetch(ledgerHashes[n].first, n))
806  {
807  JLOG(j_.warn()) << "SQLite ledger sequence " << n
808  << " mismatches node store";
809  valid = false;
810  break;
811  }
812  }
813  if (!valid)
814  continue;
815  }
816 
817  auto const path =
818  needsHistoricalPath ? chooseHistoricalPath(lock) : dir_;
819 
820  // Create the new shard
821  auto shard =
822  std::make_unique<Shard>(app_, *this, shardIndex, path, j_);
823 
824  if (!shard->open(scheduler_, *ctx_))
825  continue;
826 
827  // Create a marker file to signify an import in progress
828  auto const shardDir{path / std::to_string(shardIndex)};
829  auto const markerFile{shardDir / importMarker_};
830  {
831  std::ofstream ofs{markerFile.string()};
832  if (!ofs.is_open())
833  {
834  JLOG(j_.error()) << "shard " << shardIndex
835  << " failed to create temp marker file";
836  shard->removeOnDestroy();
837  continue;
838  }
839  ofs.close();
840  }
841 
842  // Copy the ledgers from node store
843  std::shared_ptr<Ledger> recentStored;
844  boost::optional<uint256> lastLedgerHash;
845 
846  while (auto seq = shard->prepare())
847  {
848  auto ledger{loadByIndex(*seq, app_, false)};
849  if (!ledger || ledger->info().seq != seq)
850  break;
851 
853  *ledger,
854  shard->getBackend(),
855  nullptr,
856  nullptr,
857  recentStored))
858  {
859  break;
860  }
861 
862  if (!shard->store(ledger))
863  break;
864 
865  if (!lastLedgerHash && seq == lastLedgerSeq(shardIndex))
866  lastLedgerHash = ledger->info().hash;
867 
868  recentStored = ledger;
869  }
870 
871  using namespace boost::filesystem;
872  if (lastLedgerHash && shard->isBackendComplete())
873  {
874  // Store shard final key
875  Serializer s;
877  s.add32(firstLedgerSeq(shardIndex));
878  s.add32(lastLedgerSeq(shardIndex));
879  s.addBitString(*lastLedgerHash);
880  auto nObj{NodeObject::createObject(
881  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
882 
883  try
884  {
885  shard->getBackend()->store(nObj);
886 
887  // The import process is complete and the
888  // marker file is no longer required
889  remove_all(markerFile);
890 
891  JLOG(j_.debug()) << "shard " << shardIndex
892  << " was successfully imported";
893 
894  auto const result{shards_.emplace(
895  shardIndex,
896  ShardInfo(std::move(shard), ShardInfo::State::none))};
898  result.first->second, true, lock, boost::none);
899 
900  if (shardIndex < shardBoundaryIndex(lock))
901  ++numHistShards;
902  }
903  catch (std::exception const& e)
904  {
905  JLOG(j_.error()) << "exception " << e.what()
906  << " in function " << __func__;
907  shard->removeOnDestroy();
908  }
909  }
910  else
911  {
912  JLOG(j_.error())
913  << "shard " << shardIndex << " failed to import";
914  shard->removeOnDestroy();
915  }
916  }
917 
918  updateStatus(lock);
919  }
920 
921  setFileStats();
922 }
923 
926 {
928  {
929  std::lock_guard lock(mutex_);
930  assert(init_);
931 
932  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
933  shard = it->second.shard;
934  else
935  return 0;
936  }
937 
938  return shard->getBackend()->getWriteLoad();
939 }
940 
941 void
943  NodeObjectType type,
944  Blob&& data,
945  uint256 const& hash,
946  std::uint32_t seq)
947 {
948  auto const shardIndex{seqToShardIndex(seq)};
950  {
951  std::lock_guard lock(mutex_);
952  assert(init_);
953 
954  if (shardIndex != acquireIndex_)
955  {
956  JLOG(j_.trace())
957  << "shard " << shardIndex << " is not being acquired";
958  return;
959  }
960 
961  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
962  shard = it->second.shard;
963  else
964  {
965  JLOG(j_.error())
966  << "shard " << shardIndex << " is not being acquired";
967  return;
968  }
969  }
970 
971  auto [backend, pCache, nCache] = shard->getBackendAll();
972  auto nObj{NodeObject::createObject(type, std::move(data), hash)};
973 
974  pCache->canonicalize_replace_cache(hash, nObj);
975  backend->store(nObj);
976  nCache->erase(hash);
977 
978  storeStats(nObj->getData().size());
979 }
980 
983 {
984  auto cache{getCache(seq)};
985  if (cache.first)
986  return doFetch(hash, seq, *cache.first, *cache.second, false);
987  return {};
988 }
989 
990 bool
992  uint256 const& hash,
993  std::uint32_t seq,
995 {
996  auto cache{getCache(seq)};
997  if (cache.first)
998  {
999  // See if the object is in cache
1000  object = cache.first->fetch(hash);
1001  if (object || cache.second->touch_if_exists(hash))
1002  return true;
1003  // Otherwise post a read
1004  Database::asyncFetch(hash, seq, cache.first, cache.second);
1005  }
1006  return false;
1007 }
1008 
1009 bool
1011 {
1012  auto const seq{srcLedger->info().seq};
1013  auto const shardIndex{seqToShardIndex(seq)};
1014  std::shared_ptr<Shard> shard;
1015  {
1016  std::lock_guard lock(mutex_);
1017  assert(init_);
1018 
1019  if (shardIndex != acquireIndex_)
1020  {
1021  JLOG(j_.trace())
1022  << "shard " << shardIndex << " is not being acquired";
1023  return false;
1024  }
1025 
1026  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
1027  shard = it->second.shard;
1028  else
1029  {
1030  JLOG(j_.error())
1031  << "shard " << shardIndex << " is not being acquired";
1032  return false;
1033  }
1034  }
1035 
1036  if (shard->containsLedger(seq))
1037  {
1038  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
1039  return false;
1040  }
1041 
1042  {
1043  auto [backend, pCache, nCache] = shard->getBackendAll();
1044  if (!Database::storeLedger(
1045  *srcLedger, backend, pCache, nCache, nullptr))
1046  {
1047  return false;
1048  }
1049  }
1050 
1051  return storeLedgerInShard(shard, srcLedger);
1052 }
1053 
1054 int
1056 {
1057  auto const shardIndex{seqToShardIndex(seq)};
1058  std::shared_ptr<Shard> shard;
1059  {
1060  std::lock_guard lock(mutex_);
1061  assert(init_);
1062 
1063  if (auto const it{shards_.find(shardIndex)}; it != shards_.end() &&
1064  (it->second.state == ShardInfo::State::final ||
1065  it->second.state == ShardInfo::State::acquire))
1066  {
1067  shard = it->second.shard;
1068  }
1069  else
1070  return 0;
1071  }
1072 
1073  return shard->pCache()->getTargetSize() / asyncDivider;
1074 }
1075 
1076 float
1078 {
1079  std::shared_ptr<Shard> shard;
1080  {
1081  std::lock_guard lock(mutex_);
1082  assert(init_);
1083 
1084  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
1085  shard = it->second.shard;
1086  else
1087  return 0;
1088  }
1089 
1090  return shard->pCache()->getHitRate();
1091 }
1092 
1093 void
1095 {
1097  {
1098  std::lock_guard lock(mutex_);
1099  assert(init_);
1100 
1101  for (auto const& e : shards_)
1102  if (e.second.state == ShardInfo::State::final ||
1103  e.second.state == ShardInfo::State::acquire)
1104  {
1105  shards.push_back(e.second.shard);
1106  }
1107  }
1108 
1109  for (auto const& e : shards)
1110  {
1111  if (auto shard{e.lock()}; shard)
1112  shard->sweep();
1113  }
1114 }
1115 
1116 bool
1118 {
1119  auto fail = [j = j_](std::string const& msg) {
1120  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1121  return false;
1122  };
1123 
1124  Config const& config{app_.config()};
1125  Section const& section{config.section(ConfigSection::shardDatabase())};
1126 
1127  {
1128  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1129  // A custom earliest ledger sequence can be set through the
1130  // configuration file using the 'earliest_seq' field under the
1131  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1132  // have a value greater than zero and be equally assigned in
1133  // both stanzas.
1134 
1135  std::uint32_t shardDBEarliestSeq{0};
1136  get_if_exists<std::uint32_t>(
1137  section, "earliest_seq", shardDBEarliestSeq);
1138 
1139  std::uint32_t nodeDBEarliestSeq{0};
1140  get_if_exists<std::uint32_t>(
1141  config.section(ConfigSection::nodeDatabase()),
1142  "earliest_seq",
1143  nodeDBEarliestSeq);
1144 
1145  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1146  {
1147  return fail(
1148  "and [" + ConfigSection::nodeDatabase() +
1149  "] define different 'earliest_seq' values");
1150  }
1151  }
1152 
1153  using namespace boost::filesystem;
1154  if (!get_if_exists<path>(section, "path", dir_))
1155  return fail("'path' missing");
1156 
1157  {
1158  get_if_exists(section, "max_historical_shards", maxHistoricalShards_);
1159 
1160  Section const& historicalShardPaths =
1161  config.section(SECTION_HISTORICAL_SHARD_PATHS);
1162 
1163  auto values = historicalShardPaths.values();
1164 
1165  std::sort(values.begin(), values.end());
1166  values.erase(std::unique(values.begin(), values.end()), values.end());
1167 
1168  for (auto const& s : values)
1169  {
1170  auto const dir = path(s);
1171  if (dir_ == dir)
1172  {
1173  return fail(
1174  "the 'path' cannot also be in the "
1175  "'historical_shard_path' section");
1176  }
1177 
1179  }
1180  }
1181 
1182  if (section.exists("ledgers_per_shard"))
1183  {
1184  // To be set only in standalone for testing
1185  if (!config.standalone())
1186  return fail("'ledgers_per_shard' only honored in stand alone");
1187 
1188  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1189  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1190  return fail("'ledgers_per_shard' must be a multiple of 256");
1191 
1194  }
1195 
1196  // NuDB is the default and only supported permanent storage backend
1197  backendName_ = get<std::string>(section, "type", "nudb");
1198  if (!boost::iequals(backendName_, "NuDB"))
1199  return fail("'type' value unsupported");
1200 
1201  return true;
1202 }
1203 
1206 {
1207  auto const shardIndex{seqToShardIndex(seq)};
1208  std::shared_ptr<Shard> shard;
1209  {
1210  std::lock_guard lock(mutex_);
1211  assert(init_);
1212 
1213  if (auto const it{shards_.find(shardIndex)};
1214  it != shards_.end() && it->second.shard)
1215  {
1216  shard = it->second.shard;
1217  }
1218  else
1219  return {};
1220  }
1221 
1222  return fetchInternal(hash, shard->getBackend());
1223 }
1224 
1225 boost::optional<std::uint32_t>
1227  std::uint32_t validLedgerSeq,
1229 {
1230  if (validLedgerSeq < earliestLedgerSeq())
1231  return boost::none;
1232 
1233  auto const maxShardIndex{[this, validLedgerSeq]() {
1234  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1235  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1236  --shardIndex;
1237  return shardIndex;
1238  }()};
1239  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1240 
1241  // Check if the shard store has all shards
1242  if (shards_.size() >= maxNumShards)
1243  return boost::none;
1244 
1245  if (maxShardIndex < 1024 ||
1246  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1247  {
1248  // Small or mostly full index space to sample
1249  // Find the available indexes and select one at random
1250  std::vector<std::uint32_t> available;
1251  available.reserve(maxNumShards - shards_.size());
1252 
1253  for (auto shardIndex = earliestShardIndex();
1254  shardIndex <= maxShardIndex;
1255  ++shardIndex)
1256  {
1257  if (shards_.find(shardIndex) == shards_.end())
1258  available.push_back(shardIndex);
1259  }
1260 
1261  if (available.empty())
1262  return boost::none;
1263 
1264  if (available.size() == 1)
1265  return available.front();
1266 
1267  return available[rand_int(
1268  0u, static_cast<std::uint32_t>(available.size() - 1))];
1269  }
1270 
1271  // Large, sparse index space to sample
1272  // Keep choosing indexes at random until an available one is found
1273  // chances of running more than 30 times is less than 1 in a billion
1274  for (int i = 0; i < 40; ++i)
1275  {
1276  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1277  if (shards_.find(shardIndex) == shards_.end())
1278  return shardIndex;
1279  }
1280 
1281  assert(false);
1282  return boost::none;
1283 }
1284 
1285 void
1287  ShardInfo& shardInfo,
1288  bool writeSQLite,
1290  boost::optional<uint256> const& expectedHash)
1291 {
1292  assert(shardInfo.shard);
1293  assert(shardInfo.shard->index() != acquireIndex_);
1294  assert(shardInfo.shard->isBackendComplete());
1295  assert(shardInfo.state != ShardInfo::State::finalize);
1296 
1297  auto const shardIndex{shardInfo.shard->index()};
1298 
1299  shardInfo.state = ShardInfo::State::finalize;
1300  taskQueue_->addTask([this, shardIndex, writeSQLite, expectedHash]() {
1301  if (isStopping())
1302  return;
1303 
1304  std::shared_ptr<Shard> shard;
1305  {
1306  std::lock_guard lock(mutex_);
1307  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
1308  {
1309  shard = it->second.shard;
1310  }
1311  else
1312  {
1313  JLOG(j_.error()) << "Unable to finalize shard " << shardIndex;
1314  return;
1315  }
1316  }
1317 
1318  if (!shard->finalize(writeSQLite, expectedHash))
1319  {
1320  if (isStopping())
1321  return;
1322 
1323  // Invalid or corrupt shard, remove it
1324  removeFailedShard(shard);
1325  return;
1326  }
1327 
1328  if (isStopping())
1329  return;
1330 
1331  {
1332  std::lock_guard lock(mutex_);
1333  auto const it{shards_.find(shardIndex)};
1334  if (it == shards_.end())
1335  return;
1336  it->second.state = ShardInfo::State::final;
1337  updateStatus(lock);
1338 
1339  auto const boundaryIndex = shardBoundaryIndex(lock);
1340  auto const isHistoricalShard = shardIndex < boundaryIndex;
1341 
1342  if (isHistoricalShard)
1343  {
1344  if (!historicalPaths_.empty() &&
1345  shard->getDir().parent_path() == dir_)
1346  {
1347  // This is a historical shard that wasn't
1348  // placed at a separate historical path
1349  JLOG(j_.warn()) << "shard " << shardIndex
1350  << " is not stored at a historical path";
1351  }
1352  }
1353 
1354  else
1355  {
1356  // Not a historical shard. Shift recent shards
1357  // if necessary
1358  relocateOutdatedShards(lock);
1359  assert(!boundaryIndex || shardIndex - boundaryIndex <= 1);
1360 
1361  auto& recentShard = shardIndex == boundaryIndex
1364 
1365  // Set the appropriate recent shard
1366  // index
1367  recentShard = shardIndex;
1368 
1369  if (shard->getDir().parent_path() != dir_)
1370  {
1371  JLOG(j_.warn()) << "shard " << shard->index()
1372  << " is not stored at the path";
1373  }
1374  }
1375  }
1376 
1377  setFileStats();
1378 
1379  // Update peers with new shard index
1380  if (!app_.config().standalone() &&
1382  {
1383  protocol::TMPeerShardInfo message;
1384  PublicKey const& publicKey{app_.nodeIdentity().first};
1385  message.set_nodepubkey(publicKey.data(), publicKey.size());
1386  message.set_shardindexes(std::to_string(shardIndex));
1387  app_.overlay().foreach(send_always(std::make_shared<Message>(
1388  message, protocol::mtPEER_SHARD_INFO)));
1389  }
1390  });
1391 }
1392 
1393 void
1395 {
1397  {
1398  std::lock_guard lock(mutex_);
1399  assert(init_);
1400 
1401  if (shards_.empty())
1402  return;
1403 
1404  for (auto const& e : shards_)
1405  if (e.second.shard)
1406  shards.push_back(e.second.shard);
1407  }
1408 
1409  std::uint64_t sumSz{0};
1410  std::uint32_t sumFd{0};
1411  std::uint32_t numShards{0};
1412  for (auto const& e : shards)
1413  {
1414  if (auto shard{e.lock()}; shard)
1415  {
1416  auto [sz, fd] = shard->fileInfo();
1417  sumSz += sz;
1418  sumFd += fd;
1419  ++numShards;
1420  }
1421  }
1422 
1423  std::lock_guard lock(mutex_);
1424  fileSz_ = sumSz;
1425  fdRequired_ = sumFd;
1426  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1427 
1428  if (auto const count = numHistoricalShards(lock);
1429  count >= maxHistoricalShards_)
1430  {
1431  JLOG(j_.warn()) << "maximum number of historical shards reached";
1432  canAdd_ = false;
1433  }
1434  else if (!sufficientStorage(
1435  maxHistoricalShards_ - count,
1437  lock))
1438  {
1439  JLOG(j_.warn())
1440  << "maximum shard store size exceeds available storage space";
1441  }
1442 }
1443 
1444 void
1446 {
1447  if (!shards_.empty())
1448  {
1450  for (auto const& e : shards_)
1451  if (e.second.state == ShardInfo::State::final)
1452  rs.insert(e.second.shard->index());
1453  status_ = to_string(rs);
1454  }
1455  else
1456  status_.clear();
1457 }
1458 
1461 {
1462  auto const shardIndex{seqToShardIndex(seq)};
1463  std::shared_ptr<Shard> shard;
1464  {
1465  std::lock_guard lock(mutex_);
1466  assert(init_);
1467 
1468  if (auto const it{shards_.find(shardIndex)};
1469  it != shards_.end() && it->second.shard)
1470  {
1471  shard = it->second.shard;
1472  }
1473  else
1474  return {};
1475  }
1476 
1477  std::shared_ptr<PCache> pCache;
1478  std::shared_ptr<NCache> nCache;
1479  std::tie(std::ignore, pCache, nCache) = shard->getBackendAll();
1480 
1481  return std::make_pair(pCache, nCache);
1482 }
1483 
1484 bool
1486  std::uint32_t numShards,
1487  PathDesignation pathDesignation,
1488  std::lock_guard<std::mutex> const&) const
1489 {
1490  try
1491  {
1492  std::vector<std::uint64_t> capacities;
1493 
1494  if (pathDesignation == PathDesignation::historical &&
1496  {
1497  capacities.reserve(historicalPaths_.size());
1498 
1499  for (auto const& path : historicalPaths_)
1500  {
1501  // Get the available storage for each historical path
1502  auto const availableSpace =
1503  boost::filesystem::space(path).available;
1504 
1505  capacities.push_back(availableSpace);
1506  }
1507  }
1508  else
1509  {
1510  // Get the available storage for the main shard path
1511  capacities.push_back(boost::filesystem::space(dir_).available);
1512  }
1513 
1514  for (std::uint64_t const capacity : capacities)
1515  {
1516  // Leverage all the historical shard paths to
1517  // see if collectively they can fit the specified
1518  // number of shards. For this to work properly,
1519  // each historical path must correspond to a separate
1520  // physical device or filesystem.
1521 
1522  auto const shardCap = capacity / avgShardFileSz_;
1523  if (numShards <= shardCap)
1524  return true;
1525 
1526  numShards -= shardCap;
1527  }
1528  }
1529  catch (std::exception const& e)
1530  {
1531  JLOG(j_.error()) << "exception " << e.what() << " in function "
1532  << __func__;
1533  return false;
1534  }
1535 
1536  return false;
1537 }
1538 
1539 bool
1541  std::shared_ptr<Shard>& shard,
1542  std::shared_ptr<Ledger const> const& ledger)
1543 {
1544  bool result{true};
1545 
1546  if (!shard->store(ledger))
1547  {
1548  // Invalid or corrupt shard, remove it
1549  removeFailedShard(shard);
1550  result = false;
1551  }
1552  else if (shard->isBackendComplete())
1553  {
1554  std::lock_guard lock(mutex_);
1555 
1556  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1557  {
1558  if (shard->index() == acquireIndex_)
1559  acquireIndex_ = 0;
1560 
1561  if (it->second.state != ShardInfo::State::finalize)
1562  finalizeShard(it->second, false, lock, boost::none);
1563  }
1564  else
1565  {
1566  JLOG(j_.debug())
1567  << "shard " << shard->index() << " is no longer being acquired";
1568  }
1569  }
1570 
1571  setFileStats();
1572  return result;
1573 }
1574 
1575 void
1577 {
1578  {
1579  std::lock_guard lock(mutex_);
1580 
1581  if (shard->index() == acquireIndex_)
1582  acquireIndex_ = 0;
1583 
1584  if (shard->index() == latestShardIndex_)
1585  latestShardIndex_ = boost::none;
1586 
1587  if (shard->index() == secondLatestShardIndex_)
1588  secondLatestShardIndex_ = boost::none;
1589 
1590  if ((shards_.erase(shard->index()) > 0) && shard->isFinal())
1591  updateStatus(lock);
1592  }
1593 
1594  shard->removeOnDestroy();
1595 
1596  // Reset the shared_ptr to invoke the shard's
1597  // destructor and remove it from the server
1598  shard.reset();
1599  setFileStats();
1600 }
1601 
1604 {
1605  auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex();
1606 
1607  // Shards with an index earlier than recentShardBoundaryIndex
1608  // are considered historical. The three shards at or later than
1609  // this index consist of the two most recently validated shards
1610  // and the shard still in the process of being built by live
1611  // transactions.
1612  return NodeStore::seqToShardIndex(validIndex, ledgersPerShard_) - 1;
1613 }
1614 
1617  std::lock_guard<std::mutex> const& lock) const
1618 {
1619  auto const recentShardBoundaryIndex = shardBoundaryIndex(lock);
1620 
1621  return std::count_if(
1622  shards_.begin(),
1623  shards_.end(),
1624  [recentShardBoundaryIndex](auto const& entry) {
1625  return entry.first < recentShardBoundaryIndex;
1626  });
1627 }
1628 
1629 void
1631  std::lock_guard<std::mutex> const& lock)
1632 {
1633  if (auto& cur = latestShardIndex_, &prev = secondLatestShardIndex_;
1634  cur || prev)
1635  {
1636  auto const latestShardIndex = NodeStore::seqToShardIndex(
1638 
1639  auto const separateHistoricalPath = !historicalPaths_.empty();
1640 
1641  auto const removeShard =
1642  [this](std::uint32_t const shardIndex) -> void {
1643  canAdd_ = false;
1644 
1645  if (auto it = shards_.find(shardIndex); it != shards_.end())
1646  {
1647  if (it->second.shard)
1648  removeFailedShard(it->second.shard);
1649  else
1650  {
1651  JLOG(j_.warn()) << "can't find shard to remove";
1652  }
1653  }
1654  else
1655  {
1656  JLOG(j_.warn()) << "can't find shard to remove";
1657  }
1658  };
1659 
1660  auto const keepShard =
1661  [this, &lock, removeShard, separateHistoricalPath](
1662  std::uint32_t const shardIndex) -> bool {
1664  {
1665  JLOG(j_.error())
1666  << "maximum number of historical shards reached";
1667 
1668  removeShard(shardIndex);
1669  return false;
1670  }
1671  if (separateHistoricalPath &&
1673  {
1674  JLOG(j_.error()) << "insufficient storage space available";
1675 
1676  removeShard(shardIndex);
1677  return false;
1678  }
1679 
1680  return true;
1681  };
1682 
1683  // Move a shard from the main shard path to a historical shard
1684  // path by copying the contents, and creating a new shard.
1685  auto const moveShard = [this,
1686  &lock](std::uint32_t const shardIndex) -> void {
1687  auto const dst = chooseHistoricalPath(lock);
1688 
1689  if (auto it = shards_.find(shardIndex); it != shards_.end())
1690  {
1691  if (auto& shard = it->second.shard)
1692  {
1693  // Close any open file descriptors before moving
1694  // the shard dir. Don't call removeOnDestroy since
1695  // that would attempt to close the fds after the
1696  // directory has been moved.
1697  shard->closeAll();
1698 
1699  try
1700  {
1701  // Move the shard directory to the new path
1702  boost::filesystem::rename(
1703  shard->getDir().string(),
1704  dst / std::to_string(shardIndex));
1705  }
1706  catch (...)
1707  {
1708  JLOG(j_.error())
1709  << "shard " << shardIndex
1710  << " failed to move to historical storage";
1711 
1712  return;
1713  }
1714 
1715  // Create a shard instance at the new location
1716  shard = std::make_unique<Shard>(
1717  app_, *this, shardIndex, dst, j_);
1718 
1719  // Open the new shard
1720  if (!shard->open(scheduler_, *ctx_))
1721  {
1722  JLOG(j_.error())
1723  << "shard " << shardIndex
1724  << " failed to open in historical storage";
1725 
1726  shard->removeOnDestroy();
1727  shard.reset();
1728  }
1729  }
1730  else
1731  {
1732  JLOG(j_.warn())
1733  << "can't find shard to move to historical path";
1734  }
1735  }
1736  else
1737  {
1738  JLOG(j_.warn())
1739  << "can't find shard to move to historical path";
1740  }
1741  };
1742 
1743  // See if either of the recent shards
1744  // needs to be updated
1745  bool const curNotSynched =
1746  latestShardIndex_ && *latestShardIndex_ != latestShardIndex;
1747  bool const prevNotSynched = secondLatestShardIndex_ &&
1748  *secondLatestShardIndex_ != latestShardIndex - 1;
1749 
1750  // A new shard has been published. Move outdated shards
1751  // to historical storage as needed
1752  if (curNotSynched || prevNotSynched)
1753  {
1754  if (prev)
1755  {
1756  // Move the formerly second latest shard to
1757  // historical storage
1758  if (keepShard(*prev) && separateHistoricalPath)
1759  {
1760  moveShard(*prev);
1761  }
1762 
1763  prev = boost::none;
1764  }
1765 
1766  if (cur)
1767  {
1768  // The formerly latest shard is now
1769  // the second latest
1770  if (cur == latestShardIndex - 1)
1771  {
1772  prev = cur;
1773  }
1774 
1775  // The formerly latest shard is no
1776  // longer a 'recent' shard
1777  else
1778  {
1779  // Move the formerly latest shard to
1780  // historical storage
1781  if (keepShard(*cur) && separateHistoricalPath)
1782  {
1783  moveShard(*cur);
1784  }
1785  }
1786 
1787  cur = boost::none;
1788  }
1789  }
1790  }
1791 }
1792 
1793 auto
1795  std::uint32_t shardIndex,
1797  std::lock_guard<std::mutex> const& lock) -> boost::optional<PathDesignation>
1798 {
1799  // Any shard earlier than the two most recent shards
1800  // is a historical shard
1801  auto const boundaryIndex = shardBoundaryIndex(lock);
1802  auto const isHistoricalShard = shardIndex < boundaryIndex;
1803 
1804  auto const designation = isHistoricalShard && !historicalPaths_.empty()
1807 
1808  // Check shard count and available storage space
1809  if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_)
1810  {
1811  JLOG(j_.error()) << "maximum number of historical shards reached";
1812  canAdd_ = false;
1813  return boost::none;
1814  }
1815  if (!sufficientStorage(1, designation, lock))
1816  {
1817  JLOG(j_.error()) << "insufficient storage space available";
1818  canAdd_ = false;
1819  return boost::none;
1820  }
1821 
1822  return designation;
1823 }
1824 
1825 boost::filesystem::path
1827 {
1828  // If not configured with separate historical paths,
1829  // use the main path (dir_) by default.
1830  if (historicalPaths_.empty())
1831  return dir_;
1832 
1833  boost::filesystem::path historicalShardPath;
1834  std::vector<boost::filesystem::path> potentialPaths;
1835 
1836  for (boost::filesystem::path const& path : historicalPaths_)
1837  {
1838  if (boost::filesystem::space(path).available >= avgShardFileSz_)
1839  potentialPaths.push_back(path);
1840  }
1841 
1842  if (potentialPaths.empty())
1843  {
1844  JLOG(j_.error()) << "failed to select a historical shard path";
1845  return "";
1846  }
1847 
1848  std::sample(
1849  potentialPaths.begin(),
1850  potentialPaths.end(),
1851  &historicalShardPath,
1852  1,
1853  default_prng());
1854 
1855  return historicalShardPath;
1856 }
1857 
1858 bool
1860 {
1861 #if BOOST_OS_LINUX
1862  // Each historical shard path must correspond
1863  // to a directory on a distinct device or filesystem.
1864  // Currently, this constraint is enforced only on
1865  // Linux.
1866 
1869 
1870  for (auto const& path : historicalPaths_)
1871  {
1872  struct statvfs buffer;
1873  if (statvfs(path.c_str(), &buffer))
1874  {
1875  JLOG(j_.error())
1876  << "failed to acquire stats for 'historical_shard_path': "
1877  << path;
1878  return false;
1879  }
1880 
1881  filesystemIDs[buffer.f_fsid].push_back(path.string());
1882  }
1883 
1884  bool ret = true;
1885  for (auto const& entry : filesystemIDs)
1886  {
1887  // Check to see if any of the paths
1888  // are stored on the same filesystem
1889  if (entry.second.size() > 1)
1890  {
1891  // Two or more historical storage paths
1892  // correspond to the same filesystem.
1893  JLOG(j_.error())
1894  << "The following paths correspond to the same filesystem: "
1895  << boost::algorithm::join(entry.second, ", ")
1896  << ". Each configured historical storage path should"
1897  " be on a unique device or filesystem.";
1898 
1899  ret = false;
1900  }
1901  }
1902 
1903  return ret;
1904 
1905 #else
1906  // The requirement that each historical storage path
1907  // corresponds to a distinct device or filesystem is
1908  // enforced only on Linux, so on other platforms
1909  // keep track of the available capacities for each
1910  // path. Issue a warning if we suspect any of the paths
1911  // may violate this requirement.
1912 
1913  // Map byte counts to each path that
1914  // shares that byte count.
1916  uniqueCapacities(historicalPaths_.size());
1917 
1918  for (auto const& path : historicalPaths_)
1919  uniqueCapacities[boost::filesystem::space(path).available].push_back(
1920  path.string());
1921 
1922  for (auto const& entry : uniqueCapacities)
1923  {
1924  // Check to see if any paths have the
1925  // same amount of available bytes.
1926  if (entry.second.size() > 1)
1927  {
1928  // Two or more historical storage paths may
1929  // correspond to the same device or
1930  // filesystem.
1931  JLOG(j_.warn())
1932  << "Each of the following paths have " << entry.first
1933  << " bytes free, and may be located on the same device"
1934  " or filesystem: "
1935  << boost::algorithm::join(entry.second, ", ")
1936  << ". Each configured historical storage path should"
1937  " be on a unique device or filesystem.";
1938  }
1939  }
1940 #endif
1941 
1942  return true;
1943 }
1944 
1945 //------------------------------------------------------------------------------
1946 
1949  Application& app,
1950  Stoppable& parent,
1951  Scheduler& scheduler,
1952  int readThreads,
1953  beast::Journal j)
1954 {
1955  // The shard store is optional. Future changes will require it.
1956  Section const& section{
1958  if (section.empty())
1959  return nullptr;
1960 
1961  return std::make_unique<DatabaseShardImp>(
1962  app, parent, "ShardStore", scheduler, readThreads, j);
1963 }
1964 
1965 } // namespace NodeStore
1966 } // namespace ripple
ripple::NodeStore::DatabaseShardImp::ShardInfo::State
State
Definition: DatabaseShardImp.h:178
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1948
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1133
ripple::Application
Definition: Application.h:97
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:245
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:242
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(ShardInfo &shardInfo, bool writeSQLite, std::lock_guard< std::mutex > &, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1286
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:113
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:91
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:203
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:238
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:201
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Copies a ledger stored in a different database to this one.
Definition: DatabaseShardImp.cpp:1010
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:53
std::string
STL class.
ripple::NodeStore::DatabaseShardImp::fetch
std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq) override
Fetch an object.
Definition: DatabaseShardImp.cpp:982
std::shared_ptr< Ledger >
ripple::NodeStore::Database::doFetch
std::shared_ptr< NodeObject > doFetch(uint256 const &hash, std::uint32_t seq, TaggedCache< uint256, NodeObject > &pCache, KeyCache< uint256 > &nCache, bool isAsync)
Definition: Database.cpp:184
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1225
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t seq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:97
std::exception
STL class.
std::stoul
T stoul(T... args)
ripple::NodeStore::DatabaseShardImp::PathDesignation
PathDesignation
Definition: DatabaseShardImp.h:196
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::none
@ none
ripple::Family::reset
virtual void reset()=0
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::pair
std::vector::reserve
T reserve(T... args)
ripple::NodeStore::DatabaseShardImp::getCache
std::pair< std::shared_ptr< PCache >, std::shared_ptr< NCache > > getCache(std::uint32_t seq)
Definition: DatabaseShardImp.cpp:1460
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:366
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:244
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:237
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
std::vector::size
T size(T... args)
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:210
ripple::NodeStore::DatabaseShardImp::ShardInfo::state
State state
Definition: DatabaseShardImp.h:193
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:590
ripple::NodeStore::DatabaseShardImp::secondLatestShardIndex_
boost::optional< std::uint32_t > secondLatestShardIndex_
Definition: DatabaseShardImp.h:261
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::acquire
@ acquire
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
ripple::NodeStore::DatabaseShardImp::prepareShard
bool prepareShard(std::uint32_t shardIndex) override
Prepare a shard index to be imported into the database.
Definition: DatabaseShardImp.cpp:306
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1603
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::final
@ final
ripple::NodeStore::DatabaseShardImp::asyncFetch
bool asyncFetch(uint256 const &hash, std::uint32_t seq, std::shared_ptr< NodeObject > &object) override
Fetch an object without waiting.
Definition: DatabaseShardImp.cpp:991
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:168
std::vector::front
T front(T... args)
std::sort
T sort(T... args)
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:43
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1094
ripple::Section::values
std::vector< std::string > const & values() const
Returns all the values in the section.
Definition: BasicConfig.h:76
std::string::clear
T clear(T... args)
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::finalize
@ finalize
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::DatabaseShardImp::PathDesignation::historical
@ historical
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:292
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:163
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::getDesiredAsyncReadCount
int getDesiredAsyncReadCount(std::uint32_t seq) override
Get the maximum number of async reads the node store prefers.
Definition: DatabaseShardImp.cpp:1055
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:104
ripple::NodeStore::DatabaseShardImp::getCacheHitRate
float getCacheHitRate() override
Get the positive cache hits to total attempts ratio.
Definition: DatabaseShardImp.cpp:1077
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:248
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:225
ripple::NodeStore::DatabaseShardImp::fetchFrom
std::shared_ptr< NodeObject > fetchFrom(uint256 const &hash, std::uint32_t seq) override
Definition: DatabaseShardImp.cpp:1205
std::sample
T sample(T... args)
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:379
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:925
ripple::NodeStore::DatabaseShardImp::shardBoundaryIndex
std::uint32_t shardBoundaryIndex(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1603
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:200
ripple::NodeStore::DatabaseShardImp::ShardInfo::shard
std::shared_ptr< Shard > shard
Definition: DatabaseShardImp.h:192
ripple::NodeStore::DatabaseShardImp::chooseHistoricalPath
boost::filesystem::path chooseHistoricalPath(std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1826
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1117
ripple::NodeStore::DatabaseShardImp::sufficientStorage
bool sufficientStorage(std::uint32_t numShards, PathDesignation pathDesignation, std::lock_guard< std::mutex > const &) const
Definition: DatabaseShardImp.cpp:1485
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:204
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:645
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:67
ripple::NodeStore::DatabaseShardImp::shards_
std::map< std::uint32_t, ShardInfo > shards_
Definition: DatabaseShardImp.h:213
std::ofstream
STL class.
ripple::NodeStore::Database::fetch
virtual std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq)=0
Fetch an object.
ripple::NodeStore::DatabaseShardImp::~DatabaseShardImp
~DatabaseShardImp() override
Definition: DatabaseShardImp.cpp:64
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:219
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1445
ripple::Config::standalone
bool standalone() const
Definition: Config.h:233
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t seq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:190
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > &shard)
Definition: DatabaseShardImp.cpp:1576
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:398
ripple::NodeStore::Database::storeStats
void storeStats(size_t sz)
Definition: Database.h:250
ripple::default_prng
beast::xor_shift_engine & default_prng()
Return the default random engine.
Definition: ripple/basics/random.h:65
ripple::NodeStore::DatabaseShardImp::PathDesignation::none
@ none
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::NodeStore::DatabaseShardImp::checkHistoricalPaths
bool checkHistoricalPaths() const
Definition: DatabaseShardImp.cpp:1859
ripple::ConfigSection
Definition: ConfigSections.h:28
ripple::NodeStore::DatabaseShardImp::latestShardIndex_
boost::optional< std::uint32_t > latestShardIndex_
Definition: DatabaseShardImp.h:260
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:236
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1394
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:216
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:183
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:57
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1226
ripple::NodeStore::Database::fetchInternal
std::shared_ptr< NodeObject > fetchInternal(uint256 const &hash, std::shared_ptr< Backend > backend)
Definition: Database.cpp:123
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:70
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t seq) override
Store the object.
Definition: DatabaseShardImp.cpp:942
ripple::NodeStore::DatabaseShardImp::historicalPaths_
std::vector< boost::filesystem::path > historicalPaths_
Definition: DatabaseShardImp.h:234
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:202
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:251
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::NodeStore::Database::storeLedger
virtual bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger)=0
Copies a ledger stored in a different database to this one.
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:701
ripple::NodeStore::DatabaseShardImp::maxHistoricalShards_
std::uint32_t maxHistoricalShards_
Definition: DatabaseShardImp.h:231
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:242
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t seq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:507
std::vector::begin
T begin(T... args)
ripple::NodeStore::Database::stopThreads
void stopThreads()
Definition: Database.cpp:93
std
STL namespace.
ripple::NodeStore::DatabaseShardImp::numHistoricalShards
std::uint32_t numHistoricalShards(std::lock_guard< std::mutex > const &lock) const
Definition: DatabaseShardImp.cpp:1616
ripple::NodeStore::Database::asyncFetch
virtual bool asyncFetch(uint256 const &hash, std::uint32_t seq, std::shared_ptr< NodeObject > &object)=0
Fetch an object without waiting.
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::NodeStore::DatabaseShardImp::relocateOutdatedShards
void relocateOutdatedShards(std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1630
ripple::Application::overlay
virtual Overlay & overlay()=0
std::count_if
T count_if(T... args)
std::vector::empty
T empty(T... args)
std::unique
T unique(T... args)
ripple::NodeStore::DatabaseShardImp::validate
void validate() override
Verifies shard store data is valid.
Definition: DatabaseShardImp.cpp:654
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1287
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:222
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
std::vector::end
T end(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:243
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
std::unordered_map
STL class.
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::import
@ import
ripple::NodeStore::asyncDivider
@ asyncDivider
Definition: nodestore/impl/Tuning.h:32
ripple::NodeStore::DatabaseShardImp::prepareForNewShard
boost::optional< PathDesignation > prepareForNewShard(std::uint32_t shardIndex, std::uint32_t numHistoricalShards, std::lock_guard< std::mutex > const &lock)
Definition: DatabaseShardImp.cpp:1794
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
ripple::NodeStore::DatabaseShardImp::ShardInfo
Definition: DatabaseShardImp.h:176
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:682
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:207
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::storeLedgerInShard
bool storeLedgerInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1540
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:228