rippled
DatabaseShardImp.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012, 2017 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #include <ripple/app/ledger/InboundLedgers.h>
21 #include <ripple/app/ledger/LedgerMaster.h>
22 #include <ripple/app/misc/NetworkOPs.h>
23 #include <ripple/basics/ByteUtilities.h>
24 #include <ripple/basics/chrono.h>
25 #include <ripple/basics/random.h>
26 #include <ripple/core/ConfigSections.h>
27 #include <ripple/nodestore/DummyScheduler.h>
28 #include <ripple/nodestore/impl/DatabaseShardImp.h>
29 #include <ripple/overlay/Overlay.h>
30 #include <ripple/overlay/predicates.h>
31 #include <ripple/protocol/HashPrefix.h>
32 
33 #include <boost/algorithm/string/predicate.hpp>
34 
35 namespace ripple {
36 namespace NodeStore {
37 
39  Application& app,
40  Stoppable& parent,
41  std::string const& name,
42  Scheduler& scheduler,
43  int readThreads,
45  : DatabaseShard(
46  name,
47  parent,
48  scheduler,
49  readThreads,
50  app.config().section(ConfigSection::shardDatabase()),
51  j)
52  , app_(app)
53  , parent_(parent)
54  , taskQueue_(std::make_unique<TaskQueue>(*this))
55  , earliestShardIndex_(seqToShardIndex(earliestLedgerSeq()))
56  , avgShardFileSz_(ledgersPerShard_ * kilobytes(192))
57 {
58 }
59 
61 {
62  onStop();
63 }
64 
65 bool
67 {
68  {
69  std::lock_guard lock(mutex_);
70  if (init_)
71  {
72  JLOG(j_.error()) << "already initialized";
73  return false;
74  }
75 
76  if (!initConfig(lock))
77  {
78  JLOG(j_.error()) << "invalid configuration file settings";
79  return false;
80  }
81 
82  try
83  {
84  using namespace boost::filesystem;
85  if (exists(dir_))
86  {
87  if (!is_directory(dir_))
88  {
89  JLOG(j_.error()) << "'path' must be a directory";
90  return false;
91  }
92  }
93  else
94  create_directories(dir_);
95 
96  ctx_ = std::make_unique<nudb::context>();
97  ctx_->start();
98 
99  // Find shards
100  for (auto const& d : directory_iterator(dir_))
101  {
102  if (!is_directory(d))
103  continue;
104 
105  // Check shard directory name is numeric
106  auto dirName = d.path().stem().string();
107  if (!std::all_of(dirName.begin(), dirName.end(), [](auto c) {
108  return ::isdigit(static_cast<unsigned char>(c));
109  }))
110  {
111  continue;
112  }
113 
114  auto const shardIndex{std::stoul(dirName)};
115  if (shardIndex < earliestShardIndex())
116  {
117  JLOG(j_.error()) << "shard " << shardIndex
118  << " comes before earliest shard index "
119  << earliestShardIndex();
120  return false;
121  }
122 
123  auto const shardDir{dir_ / std::to_string(shardIndex)};
124 
125  // Check if a previous import failed
126  if (is_regular_file(shardDir / importMarker_))
127  {
128  JLOG(j_.warn()) << "shard " << shardIndex
129  << " previously failed import, removing";
130  remove_all(shardDir);
131  continue;
132  }
133 
134  auto shard{
135  std::make_unique<Shard>(app_, *this, shardIndex, j_)};
136  if (!shard->open(scheduler_, *ctx_))
137  {
138  if (!shard->isLegacy())
139  return false;
140 
141  // Remove legacy shard
142  shard->removeOnDestroy();
143  JLOG(j_.warn())
144  << "shard " << shardIndex << " removed, legacy shard";
145  continue;
146  }
147 
148  if (shard->isFinal())
149  {
150  shards_.emplace(
151  shardIndex,
152  ShardInfo(std::move(shard), ShardInfo::State::final));
153  }
154  else if (shard->isBackendComplete())
155  {
156  auto const result{shards_.emplace(
157  shardIndex,
158  ShardInfo(std::move(shard), ShardInfo::State::none))};
159  finalizeShard(result.first->second, true, lock);
160  }
161  else
162  {
163  if (acquireIndex_ != 0)
164  {
165  JLOG(j_.error())
166  << "more than one shard being acquired";
167  return false;
168  }
169 
170  shards_.emplace(
171  shardIndex,
172  ShardInfo(std::move(shard), ShardInfo::State::acquire));
173  acquireIndex_ = shardIndex;
174  }
175  }
176  }
177  catch (std::exception const& e)
178  {
179  JLOG(j_.error())
180  << "exception " << e.what() << " in function " << __func__;
181  }
182 
183  updateStatus(lock);
185  init_ = true;
186  }
187 
188  setFileStats();
189  return true;
190 }
191 
192 boost::optional<std::uint32_t>
194 {
195  boost::optional<std::uint32_t> shardIndex;
196 
197  {
198  std::lock_guard lock(mutex_);
199  assert(init_);
200 
201  if (acquireIndex_ != 0)
202  {
203  if (auto it{shards_.find(acquireIndex_)}; it != shards_.end())
204  return it->second.shard->prepare();
205  assert(false);
206  return boost::none;
207  }
208 
209  if (!canAdd_)
210  return boost::none;
211 
212  // Check available storage space
214  {
215  JLOG(j_.debug()) << "maximum storage size reached";
216  canAdd_ = false;
217  return boost::none;
218  }
219  if (avgShardFileSz_ > available())
220  {
221  JLOG(j_.error()) << "insufficient storage space available";
222  canAdd_ = false;
223  return boost::none;
224  }
225 
226  shardIndex = findAcquireIndex(validLedgerSeq, lock);
227  }
228 
229  if (!shardIndex)
230  {
231  JLOG(j_.debug()) << "no new shards to add";
232  {
233  std::lock_guard lock(mutex_);
234  canAdd_ = false;
235  }
236  return boost::none;
237  }
238 
239  auto shard{std::make_unique<Shard>(app_, *this, *shardIndex, j_)};
240  if (!shard->open(scheduler_, *ctx_))
241  return boost::none;
242 
243  auto const seq{shard->prepare()};
244  {
245  std::lock_guard lock(mutex_);
246  shards_.emplace(
247  *shardIndex,
248  ShardInfo(std::move(shard), ShardInfo::State::acquire));
249  acquireIndex_ = *shardIndex;
250  }
251  return seq;
252 }
253 
254 bool
256 {
257  auto fail = [j = j_, shardIndex](std::string const& msg) {
258  JLOG(j.error()) << "shard " << shardIndex << " " << msg;
259  return false;
260  };
261  std::lock_guard lock(mutex_);
262  assert(init_);
263 
264  if (!canAdd_)
265  return fail("cannot be stored at this time");
266 
267  if (shardIndex < earliestShardIndex())
268  {
269  return fail(
270  "comes before earliest shard index " +
272  }
273 
274  // If we are synced to the network, check if the shard index
275  // is greater or equal to the current shard.
276  auto seqCheck = [&](std::uint32_t seq) {
277  // seq will be greater than zero if valid
278  if (seq > earliestLedgerSeq() && shardIndex >= seqToShardIndex(seq))
279  return fail("has an invalid index");
280  return true;
281  };
282  if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex()) ||
284  {
285  return false;
286  }
287 
288  if (shards_.find(shardIndex) != shards_.end())
289  {
290  JLOG(j_.debug()) << "shard " << shardIndex
291  << " is already stored or queued for import";
292  return false;
293  }
294 
295  // Check available storage space
297  return fail("maximum storage size reached");
298  if (avgShardFileSz_ > available())
299  return fail("insufficient storage space available");
300 
301  shards_.emplace(shardIndex, ShardInfo(nullptr, ShardInfo::State::import));
302  return true;
303 }
304 
305 void
307 {
308  std::lock_guard lock(mutex_);
309  assert(init_);
310 
311  if (auto const it{shards_.find(shardIndex)};
312  it != shards_.end() && it->second.state == ShardInfo::State::import)
313  {
314  shards_.erase(it);
315  }
316 }
317 
320 {
322  {
323  std::lock_guard lock(mutex_);
324  assert(init_);
325 
326  for (auto const& e : shards_)
327  if (e.second.state == ShardInfo::State::import)
328  rs.insert(e.first);
329  }
330 
331  if (rs.empty())
332  return {};
333 
334  return to_string(rs);
335 };
336 
337 bool
339  std::uint32_t shardIndex,
340  boost::filesystem::path const& srcDir)
341 {
342  using namespace boost::filesystem;
343  try
344  {
345  if (!is_directory(srcDir) || is_empty(srcDir))
346  {
347  JLOG(j_.error()) << "invalid source directory " << srcDir.string();
348  return false;
349  }
350  }
351  catch (std::exception const& e)
352  {
353  JLOG(j_.error()) << "exception " << e.what() << " in function "
354  << __func__;
355  return false;
356  }
357 
358  auto renameDir = [&](path const& src, path const& dst) {
359  try
360  {
361  rename(src, dst);
362  }
363  catch (std::exception const& e)
364  {
365  JLOG(j_.error())
366  << "exception " << e.what() << " in function " << __func__;
367  return false;
368  }
369  return true;
370  };
371 
372  path dstDir;
373  {
374  std::lock_guard lock(mutex_);
375  assert(init_);
376 
377  // Check shard is prepared
378  if (auto const it{shards_.find(shardIndex)}; it == shards_.end() ||
379  it->second.shard || it->second.state != ShardInfo::State::import)
380  {
381  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
382  return false;
383  }
384 
385  dstDir = dir_ / std::to_string(shardIndex);
386  }
387 
388  // Rename source directory to the shard database directory
389  if (!renameDir(srcDir, dstDir))
390  return false;
391 
392  // Create the new shard
393  auto shard{std::make_unique<Shard>(app_, *this, shardIndex, j_)};
394  if (!shard->open(scheduler_, *ctx_) || !shard->isBackendComplete())
395  {
396  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
397  shard.reset();
398  renameDir(dstDir, srcDir);
399  return false;
400  }
401 
402  std::lock_guard lock(mutex_);
403  auto const it{shards_.find(shardIndex)};
404  if (it == shards_.end() || it->second.shard ||
405  it->second.state != ShardInfo::State::import)
406  {
407  JLOG(j_.error()) << "shard " << shardIndex << " failed to import";
408  return false;
409  }
410 
411  it->second.shard = std::move(shard);
412  finalizeShard(it->second, true, lock);
413  return true;
414 }
415 
418 {
419  auto const shardIndex{seqToShardIndex(seq)};
420  {
421  ShardInfo shardInfo;
422  {
423  std::lock_guard lock(mutex_);
424  assert(init_);
425 
426  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
427  shardInfo = it->second;
428  else
429  return {};
430  }
431 
432  // Check if the ledger is stored in a final shard
433  // or in the shard being acquired
434  switch (shardInfo.state)
435  {
437  break;
439  if (shardInfo.shard->containsLedger(seq))
440  break;
441  [[fallthrough]];
442  default:
443  return {};
444  }
445  }
446 
447  auto nObj{fetch(hash, seq)};
448  if (!nObj)
449  return {};
450 
451  auto fail = [this, seq](std::string const& msg) -> std::shared_ptr<Ledger> {
452  JLOG(j_.error()) << "shard " << seqToShardIndex(seq) << " " << msg;
453  return {};
454  };
455 
456  auto ledger{std::make_shared<Ledger>(
457  InboundLedger::deserializeHeader(makeSlice(nObj->getData()), true),
458  app_.config(),
459  *app_.shardFamily())};
460 
461  if (ledger->info().seq != seq)
462  {
463  return fail(
464  "encountered invalid ledger sequence " + std::to_string(seq));
465  }
466  if (ledger->info().hash != hash)
467  {
468  return fail(
469  "encountered invalid ledger hash " + to_string(hash) +
470  " on sequence " + std::to_string(seq));
471  }
472 
473  ledger->setFull();
474  if (!ledger->stateMap().fetchRoot(
475  SHAMapHash{ledger->info().accountHash}, nullptr))
476  {
477  return fail(
478  "is missing root STATE node on hash " + to_string(hash) +
479  " on sequence " + std::to_string(seq));
480  }
481 
482  if (ledger->info().txHash.isNonZero())
483  {
484  if (!ledger->txMap().fetchRoot(
485  SHAMapHash{ledger->info().txHash}, nullptr))
486  {
487  return fail(
488  "is missing root TXN node on hash " + to_string(hash) +
489  " on sequence " + std::to_string(seq));
490  }
491  }
492  return ledger;
493 }
494 
495 void
497 {
498  if (ledger->info().hash.isZero())
499  {
500  JLOG(j_.error()) << "zero ledger hash for ledger sequence "
501  << ledger->info().seq;
502  return;
503  }
504  if (ledger->info().accountHash.isZero())
505  {
506  JLOG(j_.error()) << "zero account hash for ledger sequence "
507  << ledger->info().seq;
508  return;
509  }
510  if (ledger->stateMap().getHash().isNonZero() &&
511  !ledger->stateMap().isValid())
512  {
513  JLOG(j_.error()) << "invalid state map for ledger sequence "
514  << ledger->info().seq;
515  return;
516  }
517  if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid())
518  {
519  JLOG(j_.error()) << "invalid transaction map for ledger sequence "
520  << ledger->info().seq;
521  return;
522  }
523 
524  auto const shardIndex{seqToShardIndex(ledger->info().seq)};
526  {
527  std::lock_guard lock(mutex_);
528  assert(init_);
529 
530  if (shardIndex != acquireIndex_)
531  {
532  JLOG(j_.trace())
533  << "shard " << shardIndex << " is not being acquired";
534  return;
535  }
536 
537  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
538  shard = it->second.shard;
539  else
540  {
541  JLOG(j_.error())
542  << "shard " << shardIndex << " is not being acquired";
543  return;
544  }
545  }
546 
547  storeLedgerInShard(shard, ledger);
548 }
549 
552 {
553  std::lock_guard lock(mutex_);
554  assert(init_);
555 
556  return status_;
557 }
558 
559 void
561 {
563  {
564  std::lock_guard lock(mutex_);
565  assert(init_);
566 
567  // Only shards with a state of final should be validated
568  for (auto& e : shards_)
569  if (e.second.state == ShardInfo::State::final)
570  shards.push_back(e.second.shard);
571 
572  if (shards.empty())
573  return;
574 
575  JLOG(j_.debug()) << "Validating shards " << status_;
576  }
577 
578  for (auto const& e : shards)
579  {
580  if (auto shard{e.lock()}; shard)
581  shard->finalize(true);
582  }
583 
584  app_.shardFamily()->reset();
585 }
586 
587 void
589 {
590  // Stop read threads in base before data members are destroyed
591  stopThreads();
592 
593  std::lock_guard lock(mutex_);
594  if (shards_.empty())
595  return;
596 
597  // Notify shards to stop
598  for (auto const& e : shards_)
599  if (e.second.shard)
600  e.second.shard->stop();
601  shards_.clear();
602 }
603 
604 void
606 {
607  {
608  std::lock_guard lock(mutex_);
609  assert(init_);
610 
611  // Only the application local node store can be imported
612  if (&source != &app_.getNodeStore())
613  {
614  assert(false);
615  JLOG(j_.error()) << "invalid source database";
616  return;
617  }
618 
619  std::uint32_t earliestIndex;
620  std::uint32_t latestIndex;
621  {
622  auto loadLedger = [&](bool ascendSort =
623  true) -> boost::optional<std::uint32_t> {
625  std::uint32_t seq;
626  std::tie(ledger, seq, std::ignore) = loadLedgerHelper(
627  "WHERE LedgerSeq >= " +
629  " order by LedgerSeq " + (ascendSort ? "asc" : "desc") +
630  " limit 1",
631  app_,
632  false);
633  if (!ledger || seq == 0)
634  {
635  JLOG(j_.error()) << "no suitable ledgers were found in"
636  " the SQLite database to import";
637  return boost::none;
638  }
639  return seq;
640  };
641 
642  // Find earliest ledger sequence stored
643  auto seq{loadLedger()};
644  if (!seq)
645  return;
646  earliestIndex = seqToShardIndex(*seq);
647 
648  // Consider only complete shards
649  if (seq != firstLedgerSeq(earliestIndex))
650  ++earliestIndex;
651 
652  // Find last ledger sequence stored
653  seq = loadLedger(false);
654  if (!seq)
655  return;
656  latestIndex = seqToShardIndex(*seq);
657 
658  // Consider only complete shards
659  if (seq != lastLedgerSeq(latestIndex))
660  --latestIndex;
661 
662  if (latestIndex < earliestIndex)
663  {
664  JLOG(j_.error()) << "no suitable ledgers were found in"
665  " the SQLite database to import";
666  return;
667  }
668  }
669 
670  // Import the shards
671  for (std::uint32_t shardIndex = earliestIndex;
672  shardIndex <= latestIndex;
673  ++shardIndex)
674  {
676  {
677  JLOG(j_.error()) << "maximum storage size reached";
678  canAdd_ = false;
679  break;
680  }
681  if (avgShardFileSz_ > available())
682  {
683  JLOG(j_.error()) << "insufficient storage space available";
684  canAdd_ = false;
685  break;
686  }
687 
688  // Skip if already stored
689  if (shardIndex == acquireIndex_ ||
690  shards_.find(shardIndex) != shards_.end())
691  {
692  JLOG(j_.debug()) << "shard " << shardIndex << " already exists";
693  continue;
694  }
695 
696  // Verify SQLite ledgers are in the node store
697  {
698  auto const firstSeq{firstLedgerSeq(shardIndex)};
699  auto const lastSeq{
700  std::max(firstSeq, lastLedgerSeq(shardIndex))};
701  auto const numLedgers{
702  shardIndex == earliestShardIndex() ? lastSeq - firstSeq + 1
703  : ledgersPerShard_};
704  auto ledgerHashes{getHashesByIndex(firstSeq, lastSeq, app_)};
705  if (ledgerHashes.size() != numLedgers)
706  continue;
707 
708  bool valid{true};
709  for (std::uint32_t n = firstSeq; n <= lastSeq; n += 256)
710  {
711  if (!source.fetch(ledgerHashes[n].first, n))
712  {
713  JLOG(j_.warn()) << "SQLite ledger sequence " << n
714  << " mismatches node store";
715  valid = false;
716  break;
717  }
718  }
719  if (!valid)
720  continue;
721  }
722 
723  // Create the new shard
724  app_.shardFamily()->reset();
725  auto shard{std::make_unique<Shard>(app_, *this, shardIndex, j_)};
726  if (!shard->open(scheduler_, *ctx_))
727  continue;
728 
729  // Create a marker file to signify an import in progress
730  auto const shardDir{dir_ / std::to_string(shardIndex)};
731  auto const markerFile{shardDir / importMarker_};
732  {
733  std::ofstream ofs{markerFile.string()};
734  if (!ofs.is_open())
735  {
736  JLOG(j_.error()) << "shard " << shardIndex
737  << " failed to create temp marker file";
738  shard->removeOnDestroy();
739  continue;
740  }
741  ofs.close();
742  }
743 
744  // Copy the ledgers from node store
745  std::shared_ptr<Ledger> recentStored;
746  boost::optional<uint256> lastLedgerHash;
747 
748  while (auto seq = shard->prepare())
749  {
750  auto ledger{loadByIndex(*seq, app_, false)};
751  if (!ledger || ledger->info().seq != seq)
752  break;
753 
755  *ledger,
756  shard->getBackend(),
757  nullptr,
758  nullptr,
759  recentStored))
760  {
761  break;
762  }
763 
764  if (!shard->store(ledger))
765  break;
766 
767  if (!lastLedgerHash && seq == lastLedgerSeq(shardIndex))
768  lastLedgerHash = ledger->info().hash;
769 
770  recentStored = ledger;
771  }
772 
773  using namespace boost::filesystem;
774  if (lastLedgerHash && shard->isBackendComplete())
775  {
776  // Store shard final key
777  Serializer s;
779  s.add32(firstLedgerSeq(shardIndex));
780  s.add32(lastLedgerSeq(shardIndex));
781  s.add256(*lastLedgerHash);
782  auto nObj{NodeObject::createObject(
783  hotUNKNOWN, std::move(s.modData()), Shard::finalKey)};
784 
785  try
786  {
787  shard->getBackend()->store(nObj);
788 
789  // The import process is complete and the
790  // marker file is no longer required
791  remove_all(markerFile);
792 
793  JLOG(j_.debug()) << "shard " << shardIndex
794  << " was successfully imported";
795 
796  auto const result{shards_.emplace(
797  shardIndex,
798  ShardInfo(std::move(shard), ShardInfo::State::none))};
799  finalizeShard(result.first->second, true, lock);
800  }
801  catch (std::exception const& e)
802  {
803  JLOG(j_.error()) << "exception " << e.what()
804  << " in function " << __func__;
805  shard->removeOnDestroy();
806  }
807  }
808  else
809  {
810  JLOG(j_.error())
811  << "shard " << shardIndex << " failed to import";
812  shard->removeOnDestroy();
813  }
814  }
815 
816  updateStatus(lock);
817  }
818 
819  setFileStats();
820 }
821 
824 {
826  {
827  std::lock_guard lock(mutex_);
828  assert(init_);
829 
830  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
831  shard = it->second.shard;
832  else
833  return 0;
834  }
835 
836  return shard->getBackend()->getWriteLoad();
837 }
838 
839 void
841  NodeObjectType type,
842  Blob&& data,
843  uint256 const& hash,
844  std::uint32_t seq)
845 {
846  auto const shardIndex{seqToShardIndex(seq)};
848  {
849  std::lock_guard lock(mutex_);
850  assert(init_);
851 
852  if (shardIndex != acquireIndex_)
853  {
854  JLOG(j_.trace())
855  << "shard " << shardIndex << " is not being acquired";
856  return;
857  }
858 
859  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
860  shard = it->second.shard;
861  else
862  {
863  JLOG(j_.error())
864  << "shard " << shardIndex << " is not being acquired";
865  return;
866  }
867  }
868 
869  auto [backend, pCache, nCache] = shard->getBackendAll();
870  auto nObj{NodeObject::createObject(type, std::move(data), hash)};
871 
872  pCache->canonicalize_replace_cache(hash, nObj);
873  backend->store(nObj);
874  nCache->erase(hash);
875 
876  storeStats(nObj->getData().size());
877 }
878 
881 {
882  auto cache{getCache(seq)};
883  if (cache.first)
884  return doFetch(hash, seq, *cache.first, *cache.second, false);
885  return {};
886 }
887 
888 bool
890  uint256 const& hash,
891  std::uint32_t seq,
893 {
894  auto cache{getCache(seq)};
895  if (cache.first)
896  {
897  // See if the object is in cache
898  object = cache.first->fetch(hash);
899  if (object || cache.second->touch_if_exists(hash))
900  return true;
901  // Otherwise post a read
902  Database::asyncFetch(hash, seq, cache.first, cache.second);
903  }
904  return false;
905 }
906 
907 bool
909 {
910  auto const seq{srcLedger->info().seq};
911  auto const shardIndex{seqToShardIndex(seq)};
913  {
914  std::lock_guard lock(mutex_);
915  assert(init_);
916 
917  if (shardIndex != acquireIndex_)
918  {
919  JLOG(j_.trace())
920  << "shard " << shardIndex << " is not being acquired";
921  return false;
922  }
923 
924  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
925  shard = it->second.shard;
926  else
927  {
928  JLOG(j_.error())
929  << "shard " << shardIndex << " is not being acquired";
930  return false;
931  }
932  }
933 
934  if (shard->containsLedger(seq))
935  {
936  JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored";
937  return false;
938  }
939 
940  {
941  auto [backend, pCache, nCache] = shard->getBackendAll();
943  *srcLedger, backend, pCache, nCache, nullptr))
944  {
945  return false;
946  }
947  }
948 
949  return storeLedgerInShard(shard, srcLedger);
950 }
951 
952 int
954 {
955  auto const shardIndex{seqToShardIndex(seq)};
957  {
958  std::lock_guard lock(mutex_);
959  assert(init_);
960 
961  if (auto const it{shards_.find(shardIndex)}; it != shards_.end() &&
962  (it->second.state == ShardInfo::State::final ||
963  it->second.state == ShardInfo::State::acquire))
964  {
965  shard = it->second.shard;
966  }
967  else
968  return 0;
969  }
970 
971  return shard->pCache()->getTargetSize() / asyncDivider;
972 }
973 
974 float
976 {
978  {
979  std::lock_guard lock(mutex_);
980  assert(init_);
981 
982  if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end())
983  shard = it->second.shard;
984  else
985  return 0;
986  }
987 
988  return shard->pCache()->getHitRate();
989 }
990 
991 void
993 {
995  {
996  std::lock_guard lock(mutex_);
997  assert(init_);
998 
999  for (auto const& e : shards_)
1000  if (e.second.state == ShardInfo::State::final ||
1001  e.second.state == ShardInfo::State::acquire)
1002  {
1003  shards.push_back(e.second.shard);
1004  }
1005  }
1006 
1007  for (auto const& e : shards)
1008  {
1009  if (auto shard{e.lock()}; shard)
1010  shard->sweep();
1011  }
1012 }
1013 
1014 bool
1016 {
1017  auto fail = [j = j_](std::string const& msg) {
1018  JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg;
1019  return false;
1020  };
1021 
1022  Config const& config{app_.config()};
1023  Section const& section{config.section(ConfigSection::shardDatabase())};
1024 
1025  {
1026  // The earliest ledger sequence defaults to XRP_LEDGER_EARLIEST_SEQ.
1027  // A custom earliest ledger sequence can be set through the
1028  // configuration file using the 'earliest_seq' field under the
1029  // 'node_db' and 'shard_db' stanzas. If specified, this field must
1030  // have a value greater than zero and be equally assigned in
1031  // both stanzas.
1032 
1033  std::uint32_t shardDBEarliestSeq{0};
1034  get_if_exists<std::uint32_t>(
1035  section, "earliest_seq", shardDBEarliestSeq);
1036 
1037  std::uint32_t nodeDBEarliestSeq{0};
1038  get_if_exists<std::uint32_t>(
1039  config.section(ConfigSection::nodeDatabase()),
1040  "earliest_seq",
1041  nodeDBEarliestSeq);
1042 
1043  if (shardDBEarliestSeq != nodeDBEarliestSeq)
1044  {
1045  return fail(
1046  "and [" + ConfigSection::nodeDatabase() +
1047  "] define different 'earliest_seq' values");
1048  }
1049  }
1050 
1051  using namespace boost::filesystem;
1052  if (!get_if_exists<path>(section, "path", dir_))
1053  return fail("'path' missing");
1054 
1055  {
1056  std::uint64_t sz;
1057  if (!get_if_exists<std::uint64_t>(section, "max_size_gb", sz))
1058  return fail("'max_size_gb' missing");
1059 
1060  if ((sz << 30) < sz)
1061  return fail("'max_size_gb' overflow");
1062 
1063  // Minimum storage space required (in gigabytes)
1064  if (sz < 10)
1065  return fail("'max_size_gb' must be at least 10");
1066 
1067  // Convert to bytes
1068  maxFileSz_ = sz << 30;
1069  }
1070 
1071  if (section.exists("ledgers_per_shard"))
1072  {
1073  // To be set only in standalone for testing
1074  if (!config.standalone())
1075  return fail("'ledgers_per_shard' only honored in stand alone");
1076 
1077  ledgersPerShard_ = get<std::uint32_t>(section, "ledgers_per_shard");
1078  if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0)
1079  return fail("'ledgers_per_shard' must be a multiple of 256");
1080  }
1081 
1082  // NuDB is the default and only supported permanent storage backend
1083  backendName_ = get<std::string>(section, "type", "nudb");
1084  if (!boost::iequals(backendName_, "NuDB"))
1085  return fail("'type' value unsupported");
1086 
1087  return true;
1088 }
1089 
1092 {
1093  auto const shardIndex{seqToShardIndex(seq)};
1094  std::shared_ptr<Shard> shard;
1095  {
1096  std::lock_guard lock(mutex_);
1097  assert(init_);
1098 
1099  if (auto const it{shards_.find(shardIndex)};
1100  it != shards_.end() && it->second.shard)
1101  {
1102  shard = it->second.shard;
1103  }
1104  else
1105  return {};
1106  }
1107 
1108  return fetchInternal(hash, shard->getBackend());
1109 }
1110 
1111 boost::optional<std::uint32_t>
1113  std::uint32_t validLedgerSeq,
1115 {
1116  if (validLedgerSeq < earliestLedgerSeq())
1117  return boost::none;
1118 
1119  auto const maxShardIndex{[this, validLedgerSeq]() {
1120  auto shardIndex{seqToShardIndex(validLedgerSeq)};
1121  if (validLedgerSeq != lastLedgerSeq(shardIndex))
1122  --shardIndex;
1123  return shardIndex;
1124  }()};
1125  auto const maxNumShards{maxShardIndex - earliestShardIndex() + 1};
1126 
1127  // Check if the shard store has all shards
1128  if (shards_.size() >= maxNumShards)
1129  return boost::none;
1130 
1131  if (maxShardIndex < 1024 ||
1132  static_cast<float>(shards_.size()) / maxNumShards > 0.5f)
1133  {
1134  // Small or mostly full index space to sample
1135  // Find the available indexes and select one at random
1137  available.reserve(maxNumShards - shards_.size());
1138 
1139  for (auto shardIndex = earliestShardIndex();
1140  shardIndex <= maxShardIndex;
1141  ++shardIndex)
1142  {
1143  if (shards_.find(shardIndex) == shards_.end())
1144  available.push_back(shardIndex);
1145  }
1146 
1147  if (available.empty())
1148  return boost::none;
1149 
1150  if (available.size() == 1)
1151  return available.front();
1152 
1153  return available[rand_int(
1154  0u, static_cast<std::uint32_t>(available.size() - 1))];
1155  }
1156 
1157  // Large, sparse index space to sample
1158  // Keep choosing indexes at random until an available one is found
1159  // chances of running more than 30 times is less than 1 in a billion
1160  for (int i = 0; i < 40; ++i)
1161  {
1162  auto const shardIndex{rand_int(earliestShardIndex(), maxShardIndex)};
1163  if (shards_.find(shardIndex) == shards_.end())
1164  return shardIndex;
1165  }
1166 
1167  assert(false);
1168  return boost::none;
1169 }
1170 
1171 void
1173  ShardInfo& shardInfo,
1174  bool writeSQLite,
1176 {
1177  assert(shardInfo.shard);
1178  assert(shardInfo.shard->index() != acquireIndex_);
1179  assert(shardInfo.shard->isBackendComplete());
1180  assert(shardInfo.state != ShardInfo::State::finalize);
1181 
1182  auto const shardIndex{shardInfo.shard->index()};
1183 
1184  shardInfo.state = ShardInfo::State::finalize;
1185  taskQueue_->addTask([this, shardIndex, writeSQLite]() {
1186  if (isStopping())
1187  return;
1188 
1189  std::shared_ptr<Shard> shard;
1190  {
1191  std::lock_guard lock(mutex_);
1192  if (auto const it{shards_.find(shardIndex)}; it != shards_.end())
1193  shard = it->second.shard;
1194  else
1195  {
1196  JLOG(j_.error()) << "Unable to finalize shard " << shardIndex;
1197  return;
1198  }
1199  }
1200 
1201  if (!shard->finalize(writeSQLite))
1202  {
1203  if (isStopping())
1204  return;
1205 
1206  // Invalid or corrupt shard, remove it
1207  {
1208  std::lock_guard lock(mutex_);
1209  shards_.erase(shardIndex);
1210  updateStatus(lock);
1211  }
1212 
1213  shard->removeOnDestroy();
1214  shard.reset();
1215  setFileStats();
1216  return;
1217  }
1218 
1219  if (isStopping())
1220  return;
1221 
1222  {
1223  std::lock_guard lock(mutex_);
1224  auto const it{shards_.find(shardIndex)};
1225  if (it == shards_.end())
1226  return;
1227  it->second.state = ShardInfo::State::final;
1228  updateStatus(lock);
1229  }
1230 
1231  setFileStats();
1232 
1233  // Update peers with new shard index
1234  if (!app_.config().standalone() &&
1236  {
1237  protocol::TMPeerShardInfo message;
1238  PublicKey const& publicKey{app_.nodeIdentity().first};
1239  message.set_nodepubkey(publicKey.data(), publicKey.size());
1240  message.set_shardindexes(std::to_string(shardIndex));
1241  app_.overlay().foreach (send_always(std::make_shared<Message>(
1242  message, protocol::mtPEER_SHARD_INFO)));
1243  }
1244  });
1245 }
1246 
1247 void
1249 {
1251  {
1252  std::lock_guard lock(mutex_);
1253  assert(init_);
1254 
1255  if (shards_.empty())
1256  return;
1257 
1258  for (auto const& e : shards_)
1259  if (e.second.shard)
1260  shards.push_back(e.second.shard);
1261  }
1262 
1263  std::uint64_t sumSz{0};
1264  std::uint32_t sumFd{0};
1265  std::uint32_t numShards{0};
1266  for (auto const& e : shards)
1267  {
1268  if (auto shard{e.lock()}; shard)
1269  {
1270  auto [sz, fd] = shard->fileInfo();
1271  sumSz += sz;
1272  sumFd += fd;
1273  ++numShards;
1274  }
1275  }
1276 
1277  std::lock_guard lock(mutex_);
1278  fileSz_ = sumSz;
1279  fdRequired_ = sumFd;
1280  avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards);
1281 
1282  if (fileSz_ >= maxFileSz_)
1283  {
1284  JLOG(j_.warn()) << "maximum storage size reached";
1285  canAdd_ = false;
1286  }
1287  else if (maxFileSz_ - fileSz_ > available())
1288  {
1289  JLOG(j_.warn())
1290  << "maximum shard store size exceeds available storage space";
1291  }
1292 }
1293 
1294 void
1296 {
1297  if (!shards_.empty())
1298  {
1300  for (auto const& e : shards_)
1301  if (e.second.state == ShardInfo::State::final)
1302  rs.insert(e.second.shard->index());
1303  status_ = to_string(rs);
1304  }
1305  else
1306  status_.clear();
1307 }
1308 
1311 {
1312  auto const shardIndex{seqToShardIndex(seq)};
1313  std::shared_ptr<Shard> shard;
1314  {
1315  std::lock_guard lock(mutex_);
1316  assert(init_);
1317 
1318  if (auto const it{shards_.find(shardIndex)};
1319  it != shards_.end() && it->second.shard)
1320  {
1321  shard = it->second.shard;
1322  }
1323  else
1324  return {};
1325  }
1326 
1327  std::shared_ptr<PCache> pCache;
1328  std::shared_ptr<NCache> nCache;
1329  std::tie(std::ignore, pCache, nCache) = shard->getBackendAll();
1330 
1331  return std::make_pair(pCache, nCache);
1332 }
1333 
1336 {
1337  try
1338  {
1339  return boost::filesystem::space(dir_).available;
1340  }
1341  catch (std::exception const& e)
1342  {
1343  JLOG(j_.error()) << "exception " << e.what() << " in function "
1344  << __func__;
1345  return 0;
1346  }
1347 }
1348 
1349 bool
1351  std::shared_ptr<Shard>& shard,
1352  std::shared_ptr<Ledger const> const& ledger)
1353 {
1354  bool result{true};
1355 
1356  if (!shard->store(ledger))
1357  {
1358  // Invalid or corrupt shard, remove it
1359  {
1360  std::lock_guard lock(mutex_);
1361  shards_.erase(shard->index());
1362 
1363  if (shard->index() == acquireIndex_)
1364  acquireIndex_ = 0;
1365 
1366  updateStatus(lock);
1367  }
1368 
1369  shard->removeOnDestroy();
1370  shard.reset();
1371  result = false;
1372  }
1373  else if (shard->isBackendComplete())
1374  {
1375  std::lock_guard lock(mutex_);
1376 
1377  if (auto const it{shards_.find(shard->index())}; it != shards_.end())
1378  {
1379  if (shard->index() == acquireIndex_)
1380  acquireIndex_ = 0;
1381 
1382  if (it->second.state != ShardInfo::State::finalize)
1383  finalizeShard(it->second, false, lock);
1384  }
1385  else
1386  {
1387  JLOG(j_.debug())
1388  << "shard " << shard->index() << " is no longer being acquired";
1389  }
1390  }
1391 
1392  setFileStats();
1393  return result;
1394 }
1395 
1396 //------------------------------------------------------------------------------
1397 
1400  Application& app,
1401  Stoppable& parent,
1402  Scheduler& scheduler,
1403  int readThreads,
1404  beast::Journal j)
1405 {
1406  // The shard store is optional. Future changes will require it.
1407  Section const& section{
1409  if (section.empty())
1410  return nullptr;
1411 
1412  return std::make_unique<DatabaseShardImp>(
1413  app, parent, "ShardStore", scheduler, readThreads, j);
1414 }
1415 
1416 } // namespace NodeStore
1417 } // namespace ripple
ripple::NodeStore::make_ShardStore
std::unique_ptr< DatabaseShard > make_ShardStore(Application &app, Stoppable &parent, Scheduler &scheduler, int readThreads, beast::Journal j)
Definition: DatabaseShardImp.cpp:1399
ripple::Section
Holds a collection of configuration values.
Definition: BasicConfig.h:43
ripple::loadLedgerHelper
std::tuple< std::shared_ptr< Ledger >, std::uint32_t, uint256 > loadLedgerHelper(std::string const &sqlSuffix, Application &app, bool acquire)
Definition: Ledger.cpp:1010
ripple::Application
Definition: Application.h:94
ripple::NodeStore::DatabaseShardImp::ledgersPerShard_
std::uint32_t ledgersPerShard_
Definition: DatabaseShardImp.h:232
ripple::hotUNKNOWN
@ hotUNKNOWN
Definition: NodeObject.h:33
ripple::NodeStore::DatabaseShardImp::lastLedgerSeq
std::uint32_t lastLedgerSeq(std::uint32_t shardIndex) const override
Calculates the last ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:111
ripple::NodeStore::DatabaseShardImp::earliestShardIndex
std::uint32_t earliestShardIndex() const override
Definition: DatabaseShardImp.h:89
ripple::makeSlice
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:194
ripple::NodeStore::DatabaseShardImp::mutex_
std::mutex mutex_
Definition: DatabaseShardImp.h:196
ripple::NodeStore::DatabaseShardImp::prepareLedger
boost::optional< std::uint32_t > prepareLedger(std::uint32_t validLedgerSeq) override
Prepare to store a new ledger in the shard being acquired.
Definition: DatabaseShardImp.cpp:193
ripple::NodeStore::DatabaseShardImp::app_
Application & app_
Definition: DatabaseShardImp.h:194
ripple::NodeStore::DatabaseShardImp::storeLedger
bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger) override
Copies a ledger stored in a different database to this one.
Definition: DatabaseShardImp.cpp:908
ripple::NodeStore::Database
Persistency layer for NodeObject.
Definition: Database.h:53
std::string
STL class.
ripple::NodeStore::DatabaseShardImp::fetch
std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq) override
Fetch an object.
Definition: DatabaseShardImp.cpp:880
std::shared_ptr
STL class.
ripple::NodeStore::Database::doFetch
std::shared_ptr< NodeObject > doFetch(uint256 const &hash, std::uint32_t seq, TaggedCache< uint256, NodeObject > &pCache, KeyCache< uint256 > &nCache, bool isAsync)
Definition: Database.cpp:184
ripple::loadByIndex
std::shared_ptr< Ledger > loadByIndex(std::uint32_t ledgerIndex, Application &app, bool acquire)
Definition: Ledger.cpp:1102
ripple::NodeStore::DatabaseShardImp::seqToShardIndex
std::uint32_t seqToShardIndex(std::uint32_t seq) const override
Calculates the shard index for a given ledger sequence.
Definition: DatabaseShardImp.h:95
std::exception
STL class.
std::stoul
T stoul(T... args)
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::none
@ none
ripple::Family::reset
virtual void reset()=0
ripple::Serializer::modData
Blob & modData()
Definition: Serializer.h:212
std::pair
ripple::NodeStore::DatabaseShardImp::getCache
std::pair< std::shared_ptr< PCache >, std::shared_ptr< NCache > > getCache(std::uint32_t seq)
Definition: DatabaseShardImp.cpp:1310
ripple::NodeStore::DatabaseShardImp::removePreShard
void removePreShard(std::uint32_t shardIndex) override
Remove a previously prepared shard index for import.
Definition: DatabaseShardImp.cpp:306
ripple::LedgerMaster::getValidLedgerIndex
LedgerIndex getValidLedgerIndex()
Definition: LedgerMaster.cpp:227
ripple::NodeStore::Database::fdRequired_
int fdRequired_
Definition: Database.h:244
ripple::NodeStore::DatabaseShardImp::fileSz_
std::uint64_t fileSz_
Definition: DatabaseShardImp.h:227
std::vector
STL class.
ripple::ConfigSection::shardDatabase
static std::string shardDatabase()
Definition: ConfigSections.h:38
ripple::NodeObjectType
NodeObjectType
The types of node objects.
Definition: NodeObject.h:32
ripple::NodeObject::createObject
static std::shared_ptr< NodeObject > createObject(NodeObjectType type, Blob &&data, uint256 const &hash)
Create an object from fields.
Definition: NodeObject.cpp:37
ripple::NodeStore::DatabaseShardImp::taskQueue_
std::unique_ptr< TaskQueue > taskQueue_
Definition: DatabaseShardImp.h:203
ripple::NodeStore::DatabaseShardImp::ShardInfo::state
State state
Definition: DatabaseShardImp.h:191
ripple::NodeStore::DatabaseShardImp::setStored
void setStored(std::shared_ptr< Ledger const > const &ledger) override
Notifies the database that the given ledger has been fully acquired and stored.
Definition: DatabaseShardImp.cpp:496
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::acquire
@ acquire
beast::Journal::warn
Stream warn() const
Definition: Journal.h:327
ripple::NodeStore::DatabaseShardImp::prepareShard
bool prepareShard(std::uint32_t shardIndex) override
Prepare a shard index to be imported into the database.
Definition: DatabaseShardImp.cpp:255
std::lock_guard
STL class.
ripple::kilobytes
constexpr auto kilobytes(T value) noexcept
Definition: ByteUtilities.h:27
ripple::NetworkOPs::getOperatingMode
virtual OperatingMode getOperatingMode() const =0
ripple::Serializer::add32
int add32(std::uint32_t)
Definition: Serializer.cpp:49
ripple::NodeStore::DatabaseShardImp::available
std::uint64_t available() const
Definition: DatabaseShardImp.cpp:1335
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::final
@ final
ripple::NodeStore::DatabaseShardImp::asyncFetch
bool asyncFetch(uint256 const &hash, std::uint32_t seq, std::shared_ptr< NodeObject > &object) override
Fetch an object without waiting.
Definition: DatabaseShardImp.cpp:889
std::all_of
T all_of(T... args)
ripple::to_string
std::string to_string(ListDisposition disposition)
Definition: ValidatorList.cpp:41
ripple::NodeStore::Shard::finalKey
static const uint256 finalKey
Definition: Shard.h:154
ripple::Overlay::foreach
std::enable_if_t< !std::is_void< typename UnaryFunc::return_type >::value, typename UnaryFunc::return_type > foreach(UnaryFunc f)
Visit every active peer and return a value The functor must:
Definition: Overlay.h:176
std::shared_ptr::reset
T reset(T... args)
ripple::SHAMapHash
Definition: SHAMapTreeNode.h:43
ripple::Application::getOPs
virtual NetworkOPs & getOPs()=0
ripple::NodeStore::DatabaseShardImp::sweep
void sweep() override
Remove expired entries from the positive and negative caches.
Definition: DatabaseShardImp.cpp:992
std::string::clear
T clear(T... args)
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::finalize
@ finalize
ripple::send_always
Sends a message to all peers.
Definition: predicates.h:31
ripple::Stoppable::setParent
void setParent(Stoppable &parent)
Set the parent of this Stoppable.
Definition: Stoppable.cpp:43
ripple::NodeStore::Shard::version
static constexpr std::uint32_t version
Definition: Shard.h:149
std::tie
T tie(T... args)
std::vector::push_back
T push_back(T... args)
ripple::NodeStore::DatabaseShardImp::getDesiredAsyncReadCount
int getDesiredAsyncReadCount(std::uint32_t seq) override
Get the maximum number of async reads the node store prefers.
Definition: DatabaseShardImp.cpp:953
ripple::NodeStore::DatabaseShardImp::firstLedgerSeq
std::uint32_t firstLedgerSeq(std::uint32_t shardIndex) const override
Calculates the first ledger sequence for a given shard index.
Definition: DatabaseShardImp.h:102
ripple::NodeStore::DatabaseShardImp::getCacheHitRate
float getCacheHitRate() override
Get the positive cache hits to total attempts ratio.
Definition: DatabaseShardImp.cpp:975
ripple::NodeStore::DatabaseShardImp::avgShardFileSz_
std::uint64_t avgShardFileSz_
Definition: DatabaseShardImp.h:238
ripple::base_uint< 256 >
ripple::NodeStore::DatabaseShardImp::status_
std::string status_
Definition: DatabaseShardImp.h:218
ripple::NodeStore::DatabaseShardImp::fetchFrom
std::shared_ptr< NodeObject > fetchFrom(uint256 const &hash, std::uint32_t seq) override
Definition: DatabaseShardImp.cpp:1091
ripple::NodeStore::DatabaseShardImp::getPreShards
std::string getPreShards() override
Get shard indexes being imported.
Definition: DatabaseShardImp.cpp:319
ripple::NodeStore::DatabaseShardImp::getWriteLoad
std::int32_t getWriteLoad() const override
Retrieve the estimated number of pending write operations.
Definition: DatabaseShardImp.cpp:823
ripple::NodeStore::DatabaseShardImp::maxFileSz_
std::uint64_t maxFileSz_
Definition: DatabaseShardImp.h:224
ripple::NodeStore::TaskQueue
Definition: TaskQueue.h:32
ripple::OperatingMode::DISCONNECTED
@ DISCONNECTED
not ready to process requests
ripple::Stoppable
Provides an interface for starting and stopping.
Definition: Stoppable.h:200
ripple::Application::shardFamily
virtual Family * shardFamily()=0
ripple::NodeStore::DatabaseShardImp::ShardInfo::shard
std::shared_ptr< Shard > shard
Definition: DatabaseShardImp.h:190
ripple::rand_int
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
Definition: ripple/basics/random.h:115
ripple::NodeStore::DatabaseShardImp::initConfig
bool initConfig(std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1015
ripple::InboundLedger::deserializeHeader
static LedgerInfo deserializeHeader(Slice data, bool hasPrefix)
Definition: InboundLedger.cpp:261
ripple::NodeStore::DatabaseShardImp::init_
bool init_
Definition: DatabaseShardImp.h:197
ripple::NodeStore::DatabaseShardImp::getCompleteShards
std::string getCompleteShards() override
Query which complete shards are stored.
Definition: DatabaseShardImp.cpp:551
ripple::Application::getLedgerMaster
virtual LedgerMaster & getLedgerMaster()=0
ripple::PublicKey
A public key.
Definition: PublicKey.h:59
ripple::Config
Definition: Config.h:66
ripple::NodeStore::DatabaseShardImp::shards_
std::map< std::uint32_t, ShardInfo > shards_
Definition: DatabaseShardImp.h:206
std::ofstream
STL class.
ripple::NodeStore::Database::fetch
virtual std::shared_ptr< NodeObject > fetch(uint256 const &hash, std::uint32_t seq)=0
Fetch an object.
ripple::NodeStore::DatabaseShardImp::~DatabaseShardImp
~DatabaseShardImp() override
Definition: DatabaseShardImp.cpp:60
ripple::Application::config
virtual Config & config()=0
ripple::NodeStore::DatabaseShardImp::dir_
boost::filesystem::path dir_
Definition: DatabaseShardImp.h:212
ripple::NodeStore::DatabaseShardImp::updateStatus
void updateStatus(std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1295
ripple::Config::standalone
bool standalone() const
Definition: Config.h:216
ripple::NodeStore::seqToShardIndex
constexpr std::uint32_t seqToShardIndex(std::uint32_t seq, std::uint32_t ledgersPerShard=DatabaseShard::ledgersPerShardDefault)
Definition: DatabaseShard.h:190
ripple::Application::nodeIdentity
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
ripple::NodeStore::DatabaseShard
A collection of historical shards.
Definition: DatabaseShard.h:37
std::to_string
T to_string(T... args)
ripple::NodeStore::DatabaseShardImp::importShard
bool importShard(std::uint32_t shardIndex, boost::filesystem::path const &srcDir) override
Import a shard into the shard database.
Definition: DatabaseShardImp.cpp:338
ripple::NodeStore::Database::storeStats
void storeStats(size_t sz)
Definition: Database.h:250
beast::Journal::error
Stream error() const
Definition: Journal.h:333
ripple::ConfigSection
Definition: ConfigSections.h:28
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
ripple::NodeStore::Database::earliestLedgerSeq
std::uint32_t earliestLedgerSeq() const
Definition: Database.h:236
ripple::NodeStore::DatabaseShardImp::setFileStats
void setFileStats()
Definition: DatabaseShardImp.cpp:1248
ripple::NodeStore::DatabaseShardImp::acquireIndex_
std::uint32_t acquireIndex_
Definition: DatabaseShardImp.h:209
ripple::NodeStore::Scheduler
Scheduling for asynchronous backend activity.
Definition: ripple/nodestore/Scheduler.h:57
ripple::NodeStore::DatabaseShardImp::findAcquireIndex
boost::optional< std::uint32_t > findAcquireIndex(std::uint32_t validLedgerSeq, std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1112
ripple::NodeStore::Database::fetchInternal
std::shared_ptr< NodeObject > fetchInternal(uint256 const &hash, std::shared_ptr< Backend > backend)
Definition: Database.cpp:123
ripple::NodeStore::DatabaseShardImp::init
bool init() override
Initialize the database.
Definition: DatabaseShardImp.cpp:66
ripple::Serializer
Definition: Serializer.h:43
ripple::NodeStore::DatabaseShardImp::store
void store(NodeObjectType type, Blob &&data, uint256 const &hash, std::uint32_t seq) override
Store the object.
Definition: DatabaseShardImp.cpp:840
ripple::NodeStore::DatabaseShardImp::parent_
Stoppable & parent_
Definition: DatabaseShardImp.h:195
ripple::NodeStore::DatabaseShardImp::importMarker_
static constexpr auto importMarker_
Definition: DatabaseShardImp.h:241
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::NodeStore::Database::storeLedger
virtual bool storeLedger(std::shared_ptr< Ledger const > const &srcLedger)=0
Copies a ledger stored in a different database to this one.
ripple::Application::getNodeStore
virtual NodeStore::Database & getNodeStore()=0
ripple::NodeStore::DatabaseShardImp::import
void import(Database &source) override
Import the application local node store.
Definition: DatabaseShardImp.cpp:605
ripple::NodeStore::Database::j_
const beast::Journal j_
Definition: Database.h:242
ripple::NodeStore::DatabaseShardImp::fetchLedger
std::shared_ptr< Ledger > fetchLedger(uint256 const &hash, std::uint32_t seq) override
Fetch a ledger from the shard store.
Definition: DatabaseShardImp.cpp:417
ripple::NodeStore::Database::stopThreads
void stopThreads()
Definition: Database.cpp:93
std
STL namespace.
ripple::NodeStore::Database::asyncFetch
virtual bool asyncFetch(uint256 const &hash, std::uint32_t seq, std::shared_ptr< NodeObject > &object)=0
Fetch an object without waiting.
ripple::LedgerMaster::getCurrentLedgerIndex
LedgerIndex getCurrentLedgerIndex()
Definition: LedgerMaster.cpp:221
ripple::Application::overlay
virtual Overlay & overlay()=0
std::vector::empty
T empty(T... args)
ripple::NodeStore::DatabaseShardImp::validate
void validate() override
Verifies shard store data is valid.
Definition: DatabaseShardImp.cpp:560
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::getHashesByIndex
bool getHashesByIndex(std::uint32_t ledgerIndex, uint256 &ledgerHash, uint256 &parentHash, Application &app)
Definition: Ledger.cpp:1164
ripple::NodeStore::DatabaseShardImp::canAdd_
bool canAdd_
Definition: DatabaseShardImp.h:215
std::make_pair
T make_pair(T... args)
ripple::NodeStore::Database::scheduler_
Scheduler & scheduler_
Definition: Database.h:243
ripple::RangeSet
boost::icl::interval_set< T, std::less, ClosedInterval< T > > RangeSet
A set of closed intervals over the domain T.
Definition: RangeSet.h:69
std::max
T max(T... args)
ripple::NodeStore::DatabaseShardImp::DatabaseShardImp
DatabaseShardImp()=delete
std::unique_ptr
STL class.
ripple::NodeStore::DatabaseShardImp::ShardInfo::State::import
@ import
ripple::NodeStore::asyncDivider
@ asyncDivider
Definition: nodestore/impl/Tuning.h:32
ripple::ConfigSection::nodeDatabase
static std::string nodeDatabase()
Definition: ConfigSections.h:33
ripple::NodeStore::DatabaseShardImp::ShardInfo
Definition: DatabaseShardImp.h:174
std::exception::what
T what(T... args)
ripple::NodeStore::DatabaseShardImp::finalizeShard
void finalizeShard(ShardInfo &shardInfo, bool writeSQLite, std::lock_guard< std::mutex > &)
Definition: DatabaseShardImp.cpp:1172
ripple::NodeStore::DatabaseShardImp::onStop
void onStop() override
Override called when the stop notification is issued.
Definition: DatabaseShardImp.cpp:588
ripple::NodeStore::DatabaseShardImp::ctx_
std::unique_ptr< nudb::context > ctx_
Definition: DatabaseShardImp.h:200
ripple::BasicConfig::section
Section & section(std::string const &name)
Returns the section with the given name.
Definition: BasicConfig.cpp:138
ripple::Serializer::add256
int add256(uint256 const &)
Definition: Serializer.cpp:119
ripple::Stoppable::isStopping
bool isStopping() const
Returns true if the stoppable should stop.
Definition: Stoppable.cpp:54
ripple::NodeStore::DatabaseShardImp::storeLedgerInShard
bool storeLedgerInShard(std::shared_ptr< Shard > &shard, std::shared_ptr< Ledger const > const &ledger)
Definition: DatabaseShardImp.cpp:1350
ripple::NodeStore::DatabaseShardImp::backendName_
std::string backendName_
Definition: DatabaseShardImp.h:221