rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 namespace ripple {
36 namespace NodeStore {
37 
39  Application& app,
40  Stoppable& parent,
41  std::string const& name,
42  Scheduler& scheduler,
43  int readThreads,
45  : DatabaseShard(
46  name,
47  parent,
48  scheduler,
49  readThreads,
50  app.config().section(ConfigSection::shardDatabase()),
51  j)
52  , app_(app)
53  , parent_(parent)
54  , taskQueue_(std::make_unique<TaskQueue>(*this))
55  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
56  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192))
57 {
58 }
59 
61 {
62  onStop();
63 }
64 
65 bool
67 {
68  {
69  std::lock_guard lock(mutex_);
70  if (init_)
71  {
72  JLOG(j_.error()) << "already initialized";
73  return false;
74  }
75 
76  if (!initConfig(lock))
77  {
78  JLOG(j_.error()) << "invalid configuration file settings";
79  return false;
80  }
81 
82  try
83  {
84  using namespace boost::filesystem;
85  if (exists(dir_))
86  {
87  if (!is_directory(dir_))
88  {
89  JLOG(j_.error()) << "'path' must be a directory";
90  return false;
91  }
92  }
93  else
94  create_directories(dir_);
95 
96  ctx_ = std::make_unique<nudb::context>();
97  ctx_->start();
98 
99  // Find shards
100  for (auto const& d : directory_iterator(dir_))
101  {
102  if (!is_directory(d))
103  continue;
104 
105  // Check shard directory name is numeric
106  auto dirName = d.path().stem().string();
107  if (!std::all_of(dirName.begin(), dirName.end(), [](auto c) {
108  return ::isdigit(static_cast<unsigned char>(c));
109  }))
110  {
111  continue;
112  }
113 
114  auto const shardIndex{std::stoul(dirName)};
115  if (shardIndex < earliestShardIndex())
116  {
117  JLOG(j_.error()) << "shard " << shardIndex
118  << " comes before earliest shard index "
119  << earliestShardIndex();
120  return false;
121  }
122 
123  auto const shardDir{dir_ / std::to_string(shardIndex)};
124 
125  // Check if a previous import failed
126  if (is_regular_file(shardDir / importMarker_))
127  {
128  JLOG(j_.warn()) << "shard " << shardIndex
129  << " previously failed import, removing";
130  remove_all(shardDir);
131  continue;
132  }
133 
134  auto shard{
135  std::make_unique<Shard>(app_, *this, shardIndex, j_)};
136  if (!shard->open(scheduler_, *ctx_))
137  {
138  // Remove corrupted or legacy shard
139  shard->removeOnDestroy();
140  JLOG(j_.warn())
141  << "shard " << shardIndex << " removed, "
142  << (shard->isLegacy() ? "legacy" : "corrupted")
143  << " shard";
144  continue;
145  }
146 
147  if (shard->isFinal())
148  {
149  shards_.emplace(
150  shardIndex,
151  ShardInfo(std::move(shard), ShardInfo::State::final));
152  }
153  else if (shard->isBackendComplete())
154  {
155  auto const result{shards_.emplace(
156  shardIndex,
157  ShardInfo(std::move(shard), ShardInfo::State::none))};
159  result.first->second, true, lock, boost::none);
160  }
161  else
162  {
163  if (acquireIndex_ != 0)
164  {
165  JLOG(j_.error())
166  << "more than one shard being acquired";
167  return false;
168  }
169 
170  shards_.emplace(
171  shardIndex,
172  ShardInfo(std::move(shard), ShardInfo::State::acquire));
173  acquireIndex_ = shardIndex;
174  }
175  }
176  }
177  catch (std::exception const& e)
178  {
179  JLOG(j_.error())
180  << "exception " << e.what() << " in function " << __func__;
181  }
182 
183  updateStatus(lock);
185  init_ = true;
186  }
187 
188  setFileStats();
189  return true;
190 }
191 
192 boost::optional<std::uint32_t>
194 {
195  boost::optional<std::uint32_t> shardIndex;
196 
197  {
198  std::lock_guard lock(mutex_);
199  assert(init_);
200 
201  if (acquireIndex_ != 0)
202  {
203  if (auto it{shards_.find(acquireIndex_)}; it != shards_.end())
204  return it->second.shard->prepare();
205  assert(false);
206  return boost::none;
207  }
208 
209  if (!canAdd_)
210  return boost::none;
211 
212  // Check available storage space
214  {
215  JLOG(j_.debug()) << "maximum storage size reached";
216  canAdd_ = false;
217  return boost::none;
218  }
219  if (avgShardFileSz_ > available())
220  {
221  JLOG(j_.error()) << "insufficient storage space available";
222  canAdd_ = false;
223  return boost::none;
224  }
225 
226  shardIndex = findAcquireIndex(validLedgerSeq, lock);
227  }
228 
229  if (!shardIndex)
230  {
231  JLOG(j_.debug()) << "no new shards to add";
232  {
233  std::lock_guard lock(mutex_);
234  canAdd_ = false;
235  }
236  return boost::none;
237  }
238 
239  auto shard{std::make_unique<Shard>(app_, *this, *shardIndex, j_)};
240  if (!shard->open(scheduler_, *ctx_))
241  return boost::none;
242 
243  auto const seq{shard->prepare()};
244  {
245  std::lock_guard lock(mutex_);
246  shards_.emplace(
247  *shardIndex,
248  ShardInfo(std::move(shard), ShardInfo::State::acquire));
249  acquireIndex_ = *shardIndex;
250  }
251  return seq;
252 }
253 
254 bool
256 {
257  auto fail = [j = j_, shardIndex](std::string const& msg) {
258  JLOG(j.error()) << "shard " << shardIndex << " " << msg;
259  return false;
260  };
261  std::lock_guard lock(mutex_);
262  assert(init_);
263 
264  if (!canAdd_)
265  return fail("cannot be stored at this time");
266 
267  if (shardIndex < earliestShardIndex())
268  {
269  return fail(
270  "comes before earliest shard index " +
272  }
273 
274  // If we are synced to the network, check if the shard index
275  // is greater or equal to the current shard.
276  auto seqCheck = [&](std::uint32_t seq) {
277  // seq will be greater than zero if valid
278  if (seq >= earliestLedgerSeq() && shardIndex >= seqToShardIndex(seq))
279  return fail("has an invalid index");
280  return true;
281  };
282  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) ||
284  {
285  return false;
286  }
287 
288  if (shards_.find(shardIndex) != shards_.end())
289  {
290  JLOG(j_.debug()) << "shard " << shardIndex
291  << " is already stored or queued for import";
292  return false;
293  }
294 
295  // Check available storage space
297  return fail("maximum storage size reached");
298  if (avgShardFileSz_ > available())
299  return fail("insufficient storage space available");
300 
301  shards_.emplace(shardIndex, ShardInfo(nullptr, ShardInfo::State::import));
302  return true;
303 }
304 
305 void
307 {
308  std::lock_guard lock(mutex_);
309  assert(init_);
310 
311  if (auto const it{shards_.find(shardIndex)};
312  it != shards_.end() && it->second.state == ShardInfo::State::import)
313  {
314  shards_.erase(it);
315  }
316 }
317 
320 {
322  {
323  std::lock_guard lock(mutex_);
324  assert(init_);
325 
326  for (auto const& e : shards_)
327  if (e.second.state == ShardInfo::State::import)
328  rs.insert(e.first);
329  }
330 
331  if (rs.empty())
332  return {};
333 
334  return to_string(rs);
335 };
336 
337 bool
339  std::uint32_t shardIndex,
340  boost::filesystem::path const& srcDir)
341 {
342  using namespace boost::filesystem;
343  try
344  {
345  if (!is_directory(srcDir) || is_empty(srcDir))
346  {
347  JLOG(j_.error()) << "invalid source directory " << srcDir.string();
348  return false;
349  }
350  }
351  catch (std::exception const& e)
352  {
353  JLOG(j_.error()) << "exception " << e.what() << " in function "
354  << __func__;
355  return false;
356  }
357 
358  auto expectedHash = app_.getLedgerMaster().walkHashBySeq(
360 
361  if (!expectedHash)
362  {
363  JLOG(j_.error()) << "shard " << shardIndex
364  << " expected hash not found";
365  return false;
366  }
367 
368  auto renameDir = [&](path const& src, path const& dst) {
369  try
370  {
371  rename(src, dst);
372  }
373  catch (std::exception const& e)
374  {
375  JLOG(j_.error())
376  << "exception " << e.what() << " in function " << __func__;
377  return false;
378  }
379  return true;
380  };
381 
382  path dstDir;
383  {
384  std::lock_guard lock(mutex_);
385  assert(init_);
386 
387  // Check shard is prepared
388  if (auto const it{shards_.find(shardIndex)}; it == shards_.end() ||
389  it->second.shard || it->second.state != ShardInfo::State::import)
390  {
391  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
392  return false;
393  }
394 
395  dstDir = dir_ / std::to_string(shardIndex);
396  }
397 
398  // Rename source directory to the shard database directory
399  if (!renameDir(srcDir, dstDir))
400  return false;
401 
402  // Create the new shard
403  auto shard{std::make_unique<Shard>(app_, *this, shardIndex, j_)};
404  if (!shard->open(scheduler_, *ctx_) || !shard->isBackendComplete())
405  {
406  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
407  shard.reset();
408  renameDir(dstDir, srcDir);
409  return false;
410  }
411 
412  {
413  std::lock_guard lock(mutex_);
414  auto const it{shards_.find(shardIndex)};
415  if (it == shards_.end() || it->second.shard ||
416  it->second.state != ShardInfo::State::import)
417  {
418  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
419  shard.reset();
420  renameDir(dstDir, srcDir);
421  return false;
422  }
423 
424  it->second.shard = std::move(shard);
425  finalizeShard(it->second, true, lock, expectedHash);
426  }
427 
428  return true;
429 }
430 
433 {
434  auto const shardIndex{seqToShardIndex(seq)};
435  {
437  ShardInfo::State state;
438  {
439  std::lock_guard lock(mutex_);
440  assert(init_);
441 
442  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
443  {
444  shard = it->second.shard;
445  state = it->second.state;
446  }
447  else
448  return {};
449  }
450 
451  // Check if the ledger is stored in a final shard
452  // or in the shard being acquired
453  switch (state)
454  {
456  break;
458  if (shard->containsLedger(seq))
459  break;
460  [[fallthrough]];
461  default:
462  return {};
463  }
464  }
465 
466  auto nObj{fetch(hash, seq)};
467  if (!nObj)
468  return {};
469 
470  auto fail = [this, seq](std::string const& msg) -> std::shared_ptr<Ledger> {
471  JLOG(j_.error()) << "shard " << seqToShardIndex(seq) << " " << msg;
472  return {};
473  };
474 
475  auto ledger{std::make_shared<Ledger>(
476  deserializePrefixedHeader(makeSlice(nObj->getData())),
477  app_.config(),
478  *app_.getShardFamily())};
479 
480  if (ledger->info().seq != seq)
481  {
482  return fail(
483  "encountered invalid ledger sequence " + std::to_string(seq));
484  }
485  if (ledger->info().hash != hash)
486  {
487  return fail(
488  "encountered invalid ledger hash " + to_string(hash) +
489  " on sequence " + std::to_string(seq));
490  }
491 
492  ledger->setFull();
493  if (!ledger->stateMap().fetchRoot(
494  SHAMapHash{ledger->info().accountHash}, nullptr))
495  {
496  return fail(
497  "is missing root STATE node on hash " + to_string(hash) +
498  " on sequence " + std::to_string(seq));
499  }
500 
501  if (ledger->info().txHash.isNonZero())
502  {
503  if (!ledger->txMap().fetchRoot(
504  SHAMapHash{ledger->info().txHash}, nullptr))
505  {
506  return fail(
507  "is missing root TXN node on hash " + to_string(hash) +
508  " on sequence " + std::to_string(seq));
509  }
510  }
511  return ledger;
512 }
513 
514 void
516 {
517  if (ledger->info().hash.isZero())
518  {
519  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
520  << ledger->info().seq;
521  return;
522  }
523  if (ledger->info().accountHash.isZero())
524  {
525  JLOG(j_.error()) << "zero account hash for ledger sequence "
526  << ledger->info().seq;
527  return;
528  }
529  if (ledger->stateMap().getHash().isNonZero() &&
530  !ledger->stateMap().isValid())
531  {
532  JLOG(j_.error()) << "invalid state map for ledger sequence "
533  << ledger->info().seq;
534  return;
535  }
536  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
537  {
538  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
539  << ledger->info().seq;
540  return;
541  }
542 
543  auto const shardIndex{seqToShardIndex(ledger->info().seq)};
545  {
546  std::lock_guard lock(mutex_);
547  assert(init_);
548 
549  if (shardIndex != acquireIndex_)
550  {
551  JLOG(j_.trace())
552  << "shard " << shardIndex << " is not being acquired";
553  return;
554  }
555 
556  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
557  shard = it->second.shard;
558  else
559  {
560  JLOG(j_.error())
561  << "shard " << shardIndex << " is not being acquired";
562  return;
563  }
564  }
565 
566  storeLedgerInShard(shard, ledger);
567 }
568 
571 {
572  std::lock_guard lock(mutex_);
573  assert(init_);
574 
575  return status_;
576 }
577 
578 void
580 {
582  {
583  std::lock_guard lock(mutex_);
584  assert(init_);
585 
586  // Only shards with a state of final should be validated
587  for (auto& e : shards_)
588  if (e.second.state == ShardInfo::State::final)
589  shards.push_back(e.second.shard);
590 
591  if (shards.empty())
592  return;
593 
594  JLOG(j_.debug()) << "Validating shards " << status_;
595  }
596 
597  for (auto const& e : shards)
598  {
599  if (auto shard{e.lock()}; shard)
600  shard->finalize(true, boost::none);
601  }
602 
604 }
605 
606 void
608 {
609  // Stop read threads in base before data members are destroyed
610  stopThreads();
611 
612  std::lock_guard lock(mutex_);
613  if (shards_.empty())
614  return;
615 
616  // Notify and destroy shards
617  for (auto& e : shards_)
618  {
619  if (e.second.shard)
620  e.second.shard->stop();
621  }
622  shards_.clear();
623 }
624 
625 void
627 {
628  {
629  std::lock_guard lock(mutex_);
630  assert(init_);
631 
632  // Only the application local node store can be imported
633  if (&source != &app_.getNodeStore())
634  {
635  assert(false);
636  JLOG(j_.error()) << "invalid source database";
637  return;
638  }
639 
640  std::uint32_t earliestIndex;
641  std::uint32_t latestIndex;
642  {
643  auto loadLedger = [&](bool ascendSort =
644  true) -> boost::optional<std::uint32_t> {
646  std::uint32_t seq;
647  std::tie(ledger, seq, std::ignore) = loadLedgerHelper(
648  "WHERE LedgerSeq >= " +
650  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
651  " limit 1",
652  app_,
653  false);
654  if (!ledger || seq == 0)
655  {
656  JLOG(j_.error()) << "no suitable ledgers were found in"
657  " the SQLite database to import";
658  return boost::none;
659  }
660  return seq;
661  };
662 
663  // Find earliest ledger sequence stored
664  auto seq{loadLedger()};
665  if (!seq)
666  return;
667  earliestIndex = seqToShardIndex(*seq);
668 
669  // Consider only complete shards
670  if (seq != firstLedgerSeq(earliestIndex))
671  ++earliestIndex;
672 
673  // Find last ledger sequence stored
674  seq = loadLedger(false);
675  if (!seq)
676  return;
677  latestIndex = seqToShardIndex(*seq);
678 
679  // Consider only complete shards
680  if (seq != lastLedgerSeq(latestIndex))
681  --latestIndex;
682 
683  if (latestIndex < earliestIndex)
684  {
685  JLOG(j_.error()) << "no suitable ledgers were found in"
686  " the SQLite database to import";
687  return;
688  }
689  }
690 
691  // Import the shards
692  for (std::uint32_t shardIndex = earliestIndex;
693  shardIndex <= latestIndex;
694  ++shardIndex)
695  {
697  {
698  JLOG(j_.error()) << "maximum storage size reached";
699  canAdd_ = false;
700  break;
701  }
702  if (avgShardFileSz_ > available())
703  {
704  JLOG(j_.error()) << "insufficient storage space available";
705  canAdd_ = false;
706  break;
707  }
708 
709  // Skip if already stored
710  if (shardIndex == acquireIndex_ ||
711  shards_.find(shardIndex) != shards_.end())
712  {
713  JLOG(j_.debug()) << "shard " << shardIndex << " already exists";
714  continue;
715  }
716 
717  // Verify SQLite ledgers are in the node store
718  {
719  auto const firstSeq{firstLedgerSeq(shardIndex)};
720  auto const lastSeq{
721  std::max(firstSeq, lastLedgerSeq(shardIndex))};
722  auto const numLedgers{
723  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
724  : ledgersPerShard_};
725  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
726  if (ledgerHashes.size() != numLedgers)
727  continue;
728 
729  bool valid{true};
730  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
731  {
732  if (!source.fetch(ledgerHashes[n].first, n))
733  {
734  JLOG(j_.warn()) << "SQLite ledger sequence " << n
735  << " mismatches node store";
736  valid = false;
737  break;
738  }
739  }
740  if (!valid)
741  continue;
742  }
743 
744  // Create the new shard
745  auto shard{std::make_unique<Shard>(app_, *this, shardIndex, j_)};
746  if (!shard->open(scheduler_, *ctx_))
747  continue;
748 
749  // Create a marker file to signify an import in progress
750  auto const shardDir{dir_ / std::to_string(shardIndex)};
751  auto const markerFile{shardDir / importMarker_};
752  {
753  std::ofstream ofs{markerFile.string()};
754  if (!ofs.is_open())
755  {
756  JLOG(j_.error()) << "shard " << shardIndex
757  << " failed to create temp marker file";
758  shard->removeOnDestroy();
759  continue;
760  }
761  ofs.close();
762  }
763 
764  // Copy the ledgers from node store
765  std::shared_ptr<Ledger> recentStored;
766  boost::optional<uint256> lastLedgerHash;
767 
768  while (auto seq = shard->prepare())
769  {
770  auto ledger{loadByIndex(*seq, app_, false)};
771  if (!ledger || ledger->info().seq != seq)
772  break;
773 
775  *ledger,
776  shard->getBackend(),
777  nullptr,
778  nullptr,
779  recentStored))
780  {
781  break;
782  }
783 
784  if (!shard->store(ledger))
785  break;
786 
787  if (!lastLedgerHash && seq == lastLedgerSeq(shardIndex))
788  lastLedgerHash = ledger->info().hash;
789 
790  recentStored = ledger;
791  }
792 
793  using namespace boost::filesystem;
794  if (lastLedgerHash && shard->isBackendComplete())
795  {
796  // Store shard final key
797  Serializer s;
799  s.add32(firstLedgerSeq(shardIndex));
800  s.add32(lastLedgerSeq(shardIndex));
801  s.addBitString(*lastLedgerHash);
802  auto nObj{NodeObject::createObject(
803  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
804 
805  try
806  {
807  shard->getBackend()->store(nObj);
808 
809  // The import process is complete and the
810  // marker file is no longer required
811  remove_all(markerFile);
812 
813  JLOG(j_.debug()) << "shard " << shardIndex
814  << " was successfully imported";
815 
816  auto const result{shards_.emplace(
817  shardIndex,
818  ShardInfo(std::move(shard), ShardInfo::State::none))};
820  result.first->second, true, lock, boost::none);
821  }
822  catch (std::exception const& e)
823  {
824  JLOG(j_.error()) << "exception " << e.what()
825  << " in function " << __func__;
826  shard->removeOnDestroy();
827  }
828  }
829  else
830  {
831  JLOG(j_.error())
832  << "shard " << shardIndex << " failed to import";
833  shard->removeOnDestroy();
834  }
835  }
836 
837  updateStatus(lock);
838  }
839 
840  setFileStats();
841 }
842 
845 {
847  {
848  std::lock_guard lock(mutex_);
849  assert(init_);
850 
851  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
852  shard = it->second.shard;
853  else
854  return 0;
855  }
856 
857  return shard->getBackend()->getWriteLoad();
858 }
859 
860 void
862  NodeObjectType type,
863  Blob&& data,
864  uint256 const& hash,
865  std::uint32_t seq)
866 {
867  auto const shardIndex{seqToShardIndex(seq)};
869  {
870  std::lock_guard lock(mutex_);
871  assert(init_);
872 
873  if (shardIndex != acquireIndex_)
874  {
875  JLOG(j_.trace())
876  << "shard " << shardIndex << " is not being acquired";
877  return;
878  }
879 
880  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
881  shard = it->second.shard;
882  else
883  {
884  JLOG(j_.error())
885  << "shard " << shardIndex << " is not being acquired";
886  return;
887  }
888  }
889 
890  auto [backend, pCache, nCache] = shard->getBackendAll();
891  auto nObj{NodeObject::createObject(type, std::move(data), hash)};
892 
893  pCache->canonicalize_replace_cache(hash, nObj);
894  backend->store(nObj);
895  nCache->erase(hash);
896 
897  storeStats(nObj->getData().size());
898 }
899 
902 {
903  auto cache{getCache(seq)};
904  if (cache.first)
905  return doFetch(hash, seq, *cache.first, *cache.second, false);
906  return {};
907 }
908 
909 bool
911  uint256 const& hash,
912  std::uint32_t seq,
914 {
915  auto cache{getCache(seq)};
916  if (cache.first)
917  {
918  // See if the object is in cache
919  object = cache.first->fetch(hash);
920  if (object || cache.second->touch_if_exists(hash))
921  return true;
922  // Otherwise post a read
923  Database::asyncFetch(hash, seq, cache.first, cache.second);
924  }
925  return false;
926 }
927 
928 bool
930 {
931  auto const seq{srcLedger->info().seq};
932  auto const shardIndex{seqToShardIndex(seq)};
934  {
935  std::lock_guard lock(mutex_);
936  assert(init_);
937 
938  if (shardIndex != acquireIndex_)
939  {
940  JLOG(j_.trace())
941  << "shard " << shardIndex << " is not being acquired";
942  return false;
943  }
944 
945  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
946  shard = it->second.shard;
947  else
948  {
949  JLOG(j_.error())
950  << "shard " << shardIndex << " is not being acquired";
951  return false;
952  }
953  }
954 
955  if (shard->containsLedger(seq))
956  {
957  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
958  return false;
959  }
960 
961  {
962  auto [backend, pCache, nCache] = shard->getBackendAll();
964  *srcLedger, backend, pCache, nCache, nullptr))
965  {
966  return false;
967  }
968  }
969 
970  return storeLedgerInShard(shard, srcLedger);
971 }
972 
973 int
975 {
976  auto const shardIndex{seqToShardIndex(seq)};
978  {
979  std::lock_guard lock(mutex_);
980  assert(init_);
981 
982  if (auto const it{shards_.find(shardIndex)}; it != shards_.end() &&
983  (it->second.state == ShardInfo::State::final ||
984  it->second.state == ShardInfo::State::acquire))
985  {
986  shard = it->second.shard;
987  }
988  else
989  return 0;
990  }
991 
992  return shard->pCache()->getTargetSize() / asyncDivider;
993 }
994 
995 float
997 {
999  {
1000  std::lock_guard lock(mutex_);
1001  assert(init_);
1002 
1003  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
1004  shard = it->second.shard;
1005  else
1006  return 0;
1007  }
1008 
1009  return shard->pCache()->getHitRate();
1010 }
1011 
1012 void
1014 {
1016  {
1017  std::lock_guard lock(mutex_);
1018  assert(init_);
1019 
1020  for (auto const& e : shards_)
1021  if (e.second.state == ShardInfo::State::final ||
1022  e.second.state == ShardInfo::State::acquire)
1023  {
1024  shards.push_back(e.second.shard);
1025  }
1026  }
1027 
1028  for (auto const& e : shards)
1029  {
1030  if (auto shard{e.lock()}; shard)
1031  shard->sweep();
1032  }
1033 }
1034 
1035 bool
1037 {
1038  auto fail = [j = j_](std::string const& msg) {
1039  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1040  return false;
1041  };
1042 
1043  Config const& config{app_.config()};
1044  Section const& section{config.section(ConfigSection::shardDatabase())};
1045 
1046  {
1047  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1048  // A custom earliest ledger sequence can be set through the
1049  // configuration file using the 'earliest_seq' field under the
1050  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1051  // have a value greater than zero and be equally assigned in
1052  // both stanzas.
1053 
1054  std::uint32_t shardDBEarliestSeq{0};
1055  get_if_exists<std::uint32_t>(
1056  section, "earliest_seq", shardDBEarliestSeq);
1057 
1058  std::uint32_t nodeDBEarliestSeq{0};
1059  get_if_exists<std::uint32_t>(
1060  config.section(ConfigSection::nodeDatabase()),
1061  "earliest_seq",
1062  nodeDBEarliestSeq);
1063 
1064  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1065  {
1066  return fail(
1067  "and [" + ConfigSection::nodeDatabase() +
1068  "] define different 'earliest_seq' values");
1069  }
1070  }
1071 
1072  using namespace boost::filesystem;
1073  if (!get_if_exists<path>(section, "path", dir_))
1074  return fail("'path' missing");
1075 
1076  {
1077  std::uint64_t sz;
1078  if (!get_if_exists<std::uint64_t>(section, "max_size_gb", sz))
1079  return fail("'max_size_gb' missing");
1080 
1081  if ((sz << 30) < sz)
1082  return fail("'max_size_gb' overflow");
1083 
1084  // Minimum storage space required (in gigabytes)
1085  if (sz < 10)
1086  return fail("'max_size_gb' must be at least 10");
1087 
1088  // Convert to bytes
1089  maxFileSz_ = sz << 30;
1090  }
1091 
1092  if (section.exists("ledgers_per_shard"))
1093  {
1094  // To be set only in standalone for testing
1095  if (!config.standalone())
1096  return fail("'ledgers_per_shard' only honored in stand alone");
1097 
1098  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1099  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1100  return fail("'ledgers_per_shard' must be a multiple of 256");
1101 
1104  }
1105 
1106  // NuDB is the default and only supported permanent storage backend
1107  backendName_ = get<std::string>(section, "type", "nudb");
1108  if (!boost::iequals(backendName_, "NuDB"))
1109  return fail("'type' value unsupported");
1110 
1111  return true;
1112 }
1113 
1116 {
1117  auto const shardIndex{seqToShardIndex(seq)};
1118  std::shared_ptr<Shard> shard;
1119  {
1120  std::lock_guard lock(mutex_);
1121  assert(init_);
1122 
1123  if (auto const it{shards_.find(shardIndex)};
1124  it != shards_.end() && it->second.shard)
1125  {
1126  shard = it->second.shard;
1127  }
1128  else
1129  return {};
1130  }
1131 
1132  return fetchInternal(hash, shard->getBackend());
1133 }
1134 
1135 boost::optional<std::uint32_t>
1137  std::uint32_t validLedgerSeq,
1139 {
1140  if (validLedgerSeq < earliestLedgerSeq())
1141  return boost::none;
1142 
1143  auto const maxShardIndex{[this, validLedgerSeq]() {
1144  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1145  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1146  --shardIndex;
1147  return shardIndex;
1148  }()};
1149  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1150 
1151  // Check if the shard store has all shards
1152  if (shards_.size() >= maxNumShards)
1153  return boost::none;
1154 
1155  if (maxShardIndex < 1024 ||
1156  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1157  {
1158  // Small or mostly full index space to sample
1159  // Find the available indexes and select one at random
1161  available.reserve(maxNumShards - shards_.size());
1162 
1163  for (auto shardIndex = earliestShardIndex();
1164  shardIndex <= maxShardIndex;
1165  ++shardIndex)
1166  {
1167  if (shards_.find(shardIndex) == shards_.end())
1168  available.push_back(shardIndex);
1169  }
1170 
1171  if (available.empty())
1172  return boost::none;
1173 
1174  if (available.size() == 1)
1175  return available.front();
1176 
1177  return available[rand_int(
1178  0u, static_cast<std::uint32_t>(available.size() - 1))];
1179  }
1180 
1181  // Large, sparse index space to sample
1182  // Keep choosing indexes at random until an available one is found
1183  // chances of running more than 30 times is less than 1 in a billion
1184  for (int i = 0; i < 40; ++i)
1185  {
1186  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1187  if (shards_.find(shardIndex) == shards_.end())
1188  return shardIndex;
1189  }
1190 
1191  assert(false);
1192  return boost::none;
1193 }
1194 
1195 void
1197  ShardInfo& shardInfo,
1198  bool writeSQLite,
1200  boost::optional<uint256> const& expectedHash)
1201 {
1202  assert(shardInfo.shard);
1203  assert(shardInfo.shard->index() != acquireIndex_);
1204  assert(shardInfo.shard->isBackendComplete());
1205  assert(shardInfo.state != ShardInfo::State::finalize);
1206 
1207  auto const shardIndex{shardInfo.shard->index()};
1208 
1209  shardInfo.state = ShardInfo::State::finalize;
1210  taskQueue_->addTask([this, shardIndex, writeSQLite, expectedHash]() {
1211  if (isStopping())
1212  return;
1213 
1214  std::shared_ptr<Shard> shard;
1215  {
1216  std::lock_guard lock(mutex_);
1217  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
1218  {
1219  shard = it->second.shard;
1220  }
1221  else
1222  {
1223  JLOG(j_.error()) << "Unable to finalize shard " << shardIndex;
1224  return;
1225  }
1226  }
1227 
1228  if (!shard->finalize(writeSQLite, expectedHash))
1229  {
1230  if (isStopping())
1231  return;
1232 
1233  // Invalid or corrupt shard, remove it
1234  removeFailedShard(shard);
1235  return;
1236  }
1237 
1238  if (isStopping())
1239  return;
1240 
1241  {
1242  std::lock_guard lock(mutex_);
1243  auto const it{shards_.find(shardIndex)};
1244  if (it == shards_.end())
1245  return;
1246  it->second.state = ShardInfo::State::final;
1247  updateStatus(lock);
1248  }
1249 
1250  setFileStats();
1251 
1252  // Update peers with new shard index
1253  if (!app_.config().standalone() &&
1255  {
1256  protocol::TMPeerShardInfo message;
1257  PublicKey const& publicKey{app_.nodeIdentity().first};
1258  message.set_nodepubkey(publicKey.data(), publicKey.size());
1259  message.set_shardindexes(std::to_string(shardIndex));
1260  app_.overlay().foreach(send_always(std::make_shared<Message>(
1261  message, protocol::mtPEER_SHARD_INFO)));
1262  }
1263  });
1264 }
1265 
1266 void
1268 {
1270  {
1271  std::lock_guard lock(mutex_);
1272  assert(init_);
1273 
1274  if (shards_.empty())
1275  return;
1276 
1277  for (auto const& e : shards_)
1278  if (e.second.shard)
1279  shards.push_back(e.second.shard);
1280  }
1281 
1282  std::uint64_t sumSz{0};
1283  std::uint32_t sumFd{0};
1284  std::uint32_t numShards{0};
1285  for (auto const& e : shards)
1286  {
1287  if (auto shard{e.lock()}; shard)
1288  {
1289  auto [sz, fd] = shard->fileInfo();
1290  sumSz += sz;
1291  sumFd += fd;
1292  ++numShards;
1293  }
1294  }
1295 
1296  std::lock_guard lock(mutex_);
1297  fileSz_ = sumSz;
1298  fdRequired_ = sumFd;
1299  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1300 
1301  if (fileSz_ >= maxFileSz_)
1302  {
1303  JLOG(j_.warn()) << "maximum storage size reached";
1304  canAdd_ = false;
1305  }
1306  else if (maxFileSz_ - fileSz_ > available())
1307  {
1308  JLOG(j_.warn())
1309  << "maximum shard store size exceeds available storage space";
1310  }
1311 }
1312 
1313 void
1315 {
1316  if (!shards_.empty())
1317  {
1319  for (auto const& e : shards_)
1320  if (e.second.state == ShardInfo::State::final)
1321  rs.insert(e.second.shard->index());
1322  status_ = to_string(rs);
1323  }
1324  else
1325  status_.clear();
1326 }
1327 
1330 {
1331  auto const shardIndex{seqToShardIndex(seq)};
1332  std::shared_ptr<Shard> shard;
1333  {
1334  std::lock_guard lock(mutex_);
1335  assert(init_);
1336 
1337  if (auto const it{shards_.find(shardIndex)};
1338  it != shards_.end() && it->second.shard)
1339  {
1340  shard = it->second.shard;
1341  }
1342  else
1343  return {};
1344  }
1345 
1346  std::shared_ptr<PCache> pCache;
1347  std::shared_ptr<NCache> nCache;
1348  std::tie(std::ignore, pCache, nCache) = shard->getBackendAll();
1349 
1350  return std::make_pair(pCache, nCache);
1351 }
1352 
1355 {
1356  try
1357  {
1358  return boost::filesystem::space(dir_).available;
1359  }
1360  catch (std::exception const& e)
1361  {
1362  JLOG(j_.error()) << "exception " << e.what() << " in function "
1363  << __func__;
1364  return 0;
1365  }
1366 }
1367 
1368 bool
1370  std::shared_ptr<Shard>& shard,
1371  std::shared_ptr<Ledger const> const& ledger)
1372 {
1373  bool result{true};
1374 
1375  if (!shard->store(ledger))
1376  {
1377  // Invalid or corrupt shard, remove it
1378  removeFailedShard(shard);
1379  result = false;
1380  }
1381  else if (shard->isBackendComplete())
1382  {
1383  std::lock_guard lock(mutex_);
1384 
1385  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1386  {
1387  if (shard->index() == acquireIndex_)
1388  acquireIndex_ = 0;
1389 
1390  if (it->second.state != ShardInfo::State::finalize)
1391  finalizeShard(it->second, false, lock, boost::none);
1392  }
1393  else
1394  {
1395  JLOG(j_.debug())
1396  << "shard " << shard->index() << " is no longer being acquired";
1397  }
1398  }
1399 
1400  setFileStats();
1401  return result;
1402 }
1403 
1404 void
1406 {
1407  {
1408  std::lock_guard lock(mutex_);
1409 
1410  if (shard->index() == acquireIndex_)
1411  acquireIndex_ = 0;
1412 
1413  if ((shards_.erase(shard->index()) > 0) && shard->isFinal())
1414  updateStatus(lock);
1415  }
1416 
1417  shard->removeOnDestroy();
1418  shard.reset();
1419  setFileStats();
1420 }
1421 
1422 //------------------------------------------------------------------------------
1423 
1426  Application& app,
1427  Stoppable& parent,
1428  Scheduler& scheduler,
1429  int readThreads,
1430  beast::Journal j)
1431 {
1432  // The shard store is optional. Future changes will require it.
1433  Section const& section{
1435  if (section.empty())
1436  return nullptr;
1437 
1438  return std::make_unique<DatabaseShardImp>(
1439  app, parent, "ShardStore", scheduler, readThreads, j);
1440 }
1441 
1442 } // namespace NodeStore
1443 } // namespace ripple
ripple::NodeStore::DatabaseShardImp::ShardInfo::State
State
Definition: DatabaseShardImp.h:178
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1425
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1133
ripple::Application
Definition: Application.h:97
ripple::NodeStore::DatabaseShardImp::earliestShardIndex_
std::uint32_t earliestShardIndex_
Definition: DatabaseShardImp.h:237
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:234
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(ShardInfo &shardInfo, bool writeSQLite, std::lock_guard< std::mutex > &, boost::optional< uint256 > const &expectedHash)
Definition: DatabaseShardImp.cpp:1196
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:113
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:91
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:240
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:198
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:193
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:196
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Copies a ledger stored in a different database to this one.
Definition: DatabaseShardImp.cpp:929
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:53
std::string
STL class.
ripple::NodeStore::DatabaseShardImp::fetch
std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq) override
Fetch an object.
Definition: DatabaseShardImp.cpp:901
std::shared_ptr< Ledger >
ripple::NodeStore::Database::doFetch
std::shared_ptr< NodeObject > doFetch(uint256 const &hash, std::uint32_t seq, TaggedCache< uint256, NodeObject > &pCache, KeyCache< uint256 > &nCache, bool isAsync)
Definition: Database.cpp:184
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1225
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t seq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:97
std::exception
STL class.
std::stoul
T stoul(T... args)
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::NodeStore::DatabaseShardImp::removeFailedShard
void removeFailedShard(std::shared_ptr< Shard > shard)
Definition: DatabaseShardImp.cpp:1405
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::none
@ none
ripple::Family::reset
virtual void reset()=0
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:176
std::pair
ripple::NodeStore::DatabaseShardImp::getCache
std::pair< std::shared_ptr< PCache >, std::shared_ptr< NCache > > getCache(std::uint32_t seq)
Definition: DatabaseShardImp.cpp:1329
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:306
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:212
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:244
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:229
ripple::InboundLedger::Reason::GENERIC
@ GENERIC
std::vector
STL class.
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:205
ripple::NodeStore::DatabaseShardImp::ShardInfo::state
State state
Definition: DatabaseShardImp.h:193
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:515
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::acquire
@ acquire
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
ripple::NodeStore::DatabaseShardImp::prepareShard
bool prepareShard(std::uint32_t shardIndex) override
Prepare a shard index to be imported into the database.
Definition: DatabaseShardImp.cpp:255
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::NodeStore::DatabaseShardImp::available
std::uint64_t available() const
Definition: DatabaseShardImp.cpp:1354
ripple::LedgerMaster::walkHashBySeq
boost::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
Definition: LedgerMaster.cpp:1605
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::final
@ final
ripple::NodeStore::DatabaseShardImp::asyncFetch
bool asyncFetch(uint256 const &hash, std::uint32_t seq, std::shared_ptr< NodeObject > &object) override
Fetch an object without waiting.
Definition: DatabaseShardImp.cpp:910
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:42
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:158
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:43
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:1013
std::string::clear
T clear(T... args)
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::finalize
@ finalize
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::deserializePrefixedHeader
LedgerInfo deserializePrefixedHeader(Slice data)
Deserialize a ledger header (prefixed with 4 bytes) from a byte array.
Definition: InboundLedger.cpp:292
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:153
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::getDesiredAsyncReadCount
int getDesiredAsyncReadCount(std::uint32_t seq) override
Get the maximum number of async reads the node store prefers.
Definition: DatabaseShardImp.cpp:974
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:104
ripple::NodeStore::DatabaseShardImp::getCacheHitRate
float getCacheHitRate() override
Get the positive cache hits to total attempts ratio.
Definition: DatabaseShardImp.cpp:996
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:240
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:220
ripple::NodeStore::DatabaseShardImp::fetchFrom
std::shared_ptr< NodeObject > fetchFrom(uint256 const &hash, std::uint32_t seq) override
Definition: DatabaseShardImp.cpp:1115
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:319
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:844
ripple::NodeStore::DatabaseShardImp::maxFileSz_
std::uint64_t maxFileSz_
Definition: DatabaseShardImp.h:226
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:200
ripple::NodeStore::DatabaseShardImp::ShardInfo::shard
std::shared_ptr< Shard > shard
Definition: DatabaseShardImp.h:192
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1036
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:199
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:570
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:67
ripple::NodeStore::DatabaseShardImp::shards_
std::map< std::uint32_t, ShardInfo > shards_
Definition: DatabaseShardImp.h:208
std::ofstream
STL class.
ripple::NodeStore::Database::fetch
virtual std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq)=0
Fetch an object.
ripple::NodeStore::DatabaseShardImp::~DatabaseShardImp
~DatabaseShardImp() override
Definition: DatabaseShardImp.cpp:60
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:214
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1314
ripple::Config::standalone
bool standalone() const
Definition: Config.h:222
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t seq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:190
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:338
ripple::NodeStore::Database::storeStats
void storeStats(size_t sz)
Definition: Database.h:250
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::ConfigSection
Definition: ConfigSections.h:28
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:236
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1267
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:211
ripple::Overlay::foreach
void foreach(Function f) const
Visit every active peer.
Definition: Overlay.h:167
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:57
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1136
ripple::NodeStore::Database::fetchInternal
std::shared_ptr< NodeObject > fetchInternal(uint256 const &hash, std::shared_ptr< Backend > backend)
Definition: Database.cpp:123
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:66
ripple::Serializer
Definition: Serializer.h:39
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t seq) override
Store the object.
Definition: DatabaseShardImp.cpp:861
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:197
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:243
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::Serializer::addBitString
int addBitString(base_uint< Bits, Tag > const &v)
Definition: Serializer.h:97
ripple::NodeStore::Database::storeLedger
virtual bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger)=0
Copies a ledger stored in a different database to this one.
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:626
ripple::Application::getShardFamily
virtual Family * getShardFamily()=0
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:242
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t seq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:432
ripple::NodeStore::Database::stopThreads
void stopThreads()
Definition: Database.cpp:93
std
STL namespace.
ripple::NodeStore::Database::asyncFetch
virtual bool asyncFetch(uint256 const &hash, std::uint32_t seq, std::shared_ptr< NodeObject > &object)=0
Fetch an object without waiting.
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:206
ripple::Application::overlay
virtual Overlay & overlay()=0
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::validate
void validate() override
Verifies shard store data is valid.
Definition: DatabaseShardImp.cpp:579
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1287
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:217
std::make_pair
T make_pair(T... args)
ripple::Serializer::add32
int add32(std::uint32_t i)
Definition: Serializer.cpp:38
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:243
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::import
@ import
ripple::NodeStore::asyncDivider
@ asyncDivider
Definition: nodestore/impl/Tuning.h:32
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
ripple::NodeStore::DatabaseShardImp::ShardInfo
Definition: DatabaseShardImp.h:176
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:607
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:202
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::storeLedgerInShard
bool storeLedgerInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1369
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:223