rippled
Pg.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2020 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #ifdef RIPPLED_REPORTING
21 // Need raw socket manipulation to determine if postgres socket IPv4 or 6.
22 #if defined(_WIN32)
23 #include <winsock2.h>
24 #include <ws2tcpip.h>
25 #else
26 #include <arpa/inet.h>
27 #include <netinet/in.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
30 #endif
31 
32 #include <ripple/basics/contract.h>
33 #include <ripple/core/Pg.h>
34 #include <boost/asio/ssl/detail/openssl_init.hpp>
35 #include <boost/format.hpp>
36 #include <algorithm>
37 #include <array>
38 #include <cassert>
39 #include <cstdlib>
40 #include <cstring>
41 #include <exception>
42 #include <functional>
43 #include <iterator>
44 #include <sstream>
45 #include <stdexcept>
46 #include <string>
47 #include <string_view>
48 #include <thread>
49 #include <utility>
50 #include <vector>
51 
52 namespace ripple {
53 
54 static void
55 noticeReceiver(void* arg, PGresult const* res)
56 {
57  beast::Journal& j = *static_cast<beast::Journal*>(arg);
58  JLOG(j.info()) << "server message: " << PQresultErrorMessage(res);
59 }
60 
61 //-----------------------------------------------------------------------------
62 
64 PgResult::msg() const
65 {
66  if (error_.has_value())
67  {
69  ss << error_->first << ": " << error_->second;
70  return ss.str();
71  }
72  if (result_)
73  return "ok";
74 
75  // Must be stopping.
76  return "stopping";
77 }
78 
79 //-----------------------------------------------------------------------------
80 
81 /*
82  Connecting described in:
83  https://www.postgresql.org/docs/10/libpq-connect.html
84  */
85 void
86 Pg::connect()
87 {
88  if (conn_)
89  {
90  // Nothing to do if we already have a good connection.
91  if (PQstatus(conn_.get()) == CONNECTION_OK)
92  return;
93  /* Try resetting connection. */
94  PQreset(conn_.get());
95  }
96  else // Make new connection.
97  {
98  conn_.reset(PQconnectdbParams(
99  reinterpret_cast<char const* const*>(&config_.keywordsIdx[0]),
100  reinterpret_cast<char const* const*>(&config_.valuesIdx[0]),
101  0));
102  if (!conn_)
103  Throw<std::runtime_error>("No db connection struct");
104  }
105 
108  if (PQstatus(conn_.get()) == CONNECTION_BAD)
109  {
111  ss << "DB connection status " << PQstatus(conn_.get()) << ": "
112  << PQerrorMessage(conn_.get());
113  Throw<std::runtime_error>(ss.str());
114  }
115 
116  // Log server session console messages.
117  PQsetNoticeReceiver(
118  conn_.get(), noticeReceiver, const_cast<beast::Journal*>(&j_));
119 }
120 
121 PgResult
122 Pg::query(char const* command, std::size_t nParams, char const* const* values)
123 {
124  // The result object must be freed using the libpq API PQclear() call.
125  pg_result_type ret{nullptr, [](PGresult* result) { PQclear(result); }};
126  // Connect then submit query.
127  while (true)
128  {
129  {
131  if (stop_)
132  return PgResult();
133  }
134  try
135  {
136  connect();
137  if (nParams)
138  {
139  // PQexecParams can process only a single command.
140  ret.reset(PQexecParams(
141  conn_.get(),
142  command,
143  nParams,
144  nullptr,
145  values,
146  nullptr,
147  nullptr,
148  0));
149  }
150  else
151  {
152  // PQexec can process multiple commands separated by
153  // semi-colons. Returns the response from the last
154  // command processed.
155  ret.reset(PQexec(conn_.get(), command));
156  }
157  if (!ret)
158  Throw<std::runtime_error>("no result structure returned");
159  break;
160  }
161  catch (std::exception const& e)
162  {
163  // Sever connection and retry until successful.
164  disconnect();
165  JLOG(j_.error()) << "database error, retrying: " << e.what();
167  }
168  }
169 
170  // Ensure proper query execution.
171  switch (PQresultStatus(ret.get()))
172  {
173  case PGRES_TUPLES_OK:
174  case PGRES_COMMAND_OK:
175  case PGRES_COPY_IN:
176  case PGRES_COPY_OUT:
177  case PGRES_COPY_BOTH:
178  break;
179  default: {
181  ss << "bad query result: " << PQresStatus(PQresultStatus(ret.get()))
182  << " error message: " << PQerrorMessage(conn_.get())
183  << ", number of tuples: " << PQntuples(ret.get())
184  << ", number of fields: " << PQnfields(ret.get());
185  JLOG(j_.error()) << ss.str();
186  PgResult retRes(ret.get(), conn_.get());
187  disconnect();
188  return retRes;
189  }
190  }
191 
192  return PgResult(std::move(ret));
193 }
194 
195 static pg_formatted_params
196 formatParams(pg_params const& dbParams, beast::Journal const& j)
197 {
198  std::vector<std::optional<std::string>> const& values = dbParams.second;
199  /* Convert vector to C-style array of C-strings for postgres API.
200  std::nullopt is a proxy for NULL since an empty std::string is
201  0 length but not NULL. */
202  std::vector<char const*> valuesIdx;
203  valuesIdx.reserve(values.size());
205  bool first = true;
206  for (auto const& value : values)
207  {
208  if (value)
209  {
210  valuesIdx.push_back(value->c_str());
211  ss << value->c_str();
212  }
213  else
214  {
215  valuesIdx.push_back(nullptr);
216  ss << "(null)";
217  }
218  if (first)
219  first = false;
220  else
221  ss << ',';
222  }
223 
224  JLOG(j.trace()) << "query: " << dbParams.first << ". params: " << ss.str();
225  return valuesIdx;
226 }
227 
228 PgResult
229 Pg::query(pg_params const& dbParams)
230 {
231  char const* const& command = dbParams.first;
232  auto const formattedParams = formatParams(dbParams, j_);
233  return query(
234  command,
235  formattedParams.size(),
236  formattedParams.size()
237  ? reinterpret_cast<char const* const*>(&formattedParams[0])
238  : nullptr);
239 }
240 
241 void
242 Pg::bulkInsert(char const* table, std::string const& records)
243 {
244  // https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND
245  assert(conn_.get());
246  static auto copyCmd = boost::format(R"(COPY %s FROM stdin)");
247  auto res = query(boost::str(copyCmd % table).c_str());
248  if (!res || res.status() != PGRES_COPY_IN)
249  {
251  ss << "bulkInsert to " << table
252  << ". Postgres insert error: " << res.msg();
253  if (res)
254  ss << ". Query status not PGRES_COPY_IN: " << res.status();
255  Throw<std::runtime_error>(ss.str());
256  }
257 
258  if (PQputCopyData(conn_.get(), records.c_str(), records.size()) == -1)
259  {
261  ss << "bulkInsert to " << table
262  << ". PQputCopyData error: " << PQerrorMessage(conn_.get());
263  disconnect();
264  Throw<std::runtime_error>(ss.str());
265  }
266 
267  if (PQputCopyEnd(conn_.get(), nullptr) == -1)
268  {
270  ss << "bulkInsert to " << table
271  << ". PQputCopyEnd error: " << PQerrorMessage(conn_.get());
272  disconnect();
273  Throw<std::runtime_error>(ss.str());
274  }
275 
276  // The result object must be freed using the libpq API PQclear() call.
277  pg_result_type copyEndResult{
278  nullptr, [](PGresult* result) { PQclear(result); }};
279  copyEndResult.reset(PQgetResult(conn_.get()));
280  ExecStatusType status = PQresultStatus(copyEndResult.get());
281  if (status != PGRES_COMMAND_OK)
282  {
284  ss << "bulkInsert to " << table
285  << ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status;
286  disconnect();
287  Throw<std::runtime_error>(ss.str());
288  }
289 }
290 
291 bool
292 Pg::clear()
293 {
294  if (!conn_)
295  return false;
296 
297  // The result object must be freed using the libpq API PQclear() call.
298  pg_result_type res{nullptr, [](PGresult* result) { PQclear(result); }};
299 
300  // Consume results until no more, or until the connection is severed.
301  do
302  {
303  res.reset(PQgetResult(conn_.get()));
304  if (!res)
305  break;
306 
307  // Pending bulk copy operations may leave the connection in such a
308  // state that it must be disconnected.
309  switch (PQresultStatus(res.get()))
310  {
311  case PGRES_COPY_IN:
312  if (PQputCopyEnd(conn_.get(), nullptr) != -1)
313  break;
314  [[fallthrough]]; // avoids compiler warning
315  case PGRES_COPY_OUT:
316  case PGRES_COPY_BOTH:
317  conn_.reset();
318  default:;
319  }
320  } while (res && conn_);
321 
322  return conn_ != nullptr;
323 }
324 
325 //-----------------------------------------------------------------------------
326 
327 PgPool::PgPool(Section const& pgConfig, Stoppable& parent, beast::Journal j)
328  : Stoppable("PgPool", parent), j_(j)
329 {
330  // Make sure that boost::asio initializes the SSL library.
331  {
332  static boost::asio::ssl::detail::openssl_init<true> initSsl;
333  }
334  // Don't have postgres client initialize SSL.
335  PQinitOpenSSL(0, 0);
336 
337  /*
338  Connect to postgres to create low level connection parameters
339  with optional caching of network address info for subsequent connections.
340  See https://www.postgresql.org/docs/10/libpq-connect.html
341 
342  For bounds checking of postgres connection data received from
343  the network: the largest size for any connection field in
344  PG source code is 64 bytes as of 5/2019. There are 29 fields.
345  */
346  constexpr std::size_t maxFieldSize = 1024;
347  constexpr std::size_t maxFields = 1000;
348 
349  // The connection object must be freed using the libpq API PQfinish() call.
350  pg_connection_type conn(
351  PQconnectdb(get<std::string>(pgConfig, "conninfo").c_str()),
352  [](PGconn* conn) { PQfinish(conn); });
353  if (!conn)
354  Throw<std::runtime_error>("Can't create DB connection.");
355  if (PQstatus(conn.get()) != CONNECTION_OK)
356  {
358  ss << "Initial DB connection failed: " << PQerrorMessage(conn.get());
359  Throw<std::runtime_error>(ss.str());
360  }
361 
362  int const sockfd = PQsocket(conn.get());
363  if (sockfd == -1)
364  Throw<std::runtime_error>("No DB socket is open.");
365  struct sockaddr_storage addr;
366  socklen_t len = sizeof(addr);
367  if (getpeername(sockfd, reinterpret_cast<struct sockaddr*>(&addr), &len) ==
368  -1)
369  {
370  Throw<std::system_error>(
371  errno, std::generic_category(), "Can't get server address info.");
372  }
373 
374  // Set "port" and "hostaddr" if we're caching it.
375  bool const remember_ip = get(pgConfig, "remember_ip", true);
376 
377  if (remember_ip)
378  {
379  config_.keywords.push_back("port");
380  config_.keywords.push_back("hostaddr");
381  std::string port;
382  std::string hostaddr;
383 
384  if (addr.ss_family == AF_INET)
385  {
386  hostaddr.assign(INET_ADDRSTRLEN, '\0');
387  struct sockaddr_in const& ainfo =
388  reinterpret_cast<struct sockaddr_in&>(addr);
389  port = std::to_string(ntohs(ainfo.sin_port));
390  if (!inet_ntop(
391  AF_INET, &ainfo.sin_addr, &hostaddr[0], hostaddr.size()))
392  {
393  Throw<std::system_error>(
394  errno,
396  "Can't get IPv4 address string.");
397  }
398  }
399  else if (addr.ss_family == AF_INET6)
400  {
401  hostaddr.assign(INET6_ADDRSTRLEN, '\0');
402  struct sockaddr_in6 const& ainfo =
403  reinterpret_cast<struct sockaddr_in6&>(addr);
404  port = std::to_string(ntohs(ainfo.sin6_port));
405  if (!inet_ntop(
406  AF_INET6, &ainfo.sin6_addr, &hostaddr[0], hostaddr.size()))
407  {
408  Throw<std::system_error>(
409  errno,
411  "Can't get IPv6 address string.");
412  }
413  }
414 
415  config_.values.push_back(port.c_str());
416  config_.values.push_back(hostaddr.c_str());
417  }
418  std::unique_ptr<PQconninfoOption, void (*)(PQconninfoOption*)> connOptions(
419  PQconninfo(conn.get()),
420  [](PQconninfoOption* opts) { PQconninfoFree(opts); });
421  if (!connOptions)
422  Throw<std::runtime_error>("Can't get DB connection options.");
423 
424  std::size_t nfields = 0;
425  for (PQconninfoOption* option = connOptions.get();
426  option->keyword != nullptr;
427  ++option)
428  {
429  if (++nfields > maxFields)
430  {
432  ss << "DB returned connection options with > " << maxFields
433  << " fields.";
434  Throw<std::runtime_error>(ss.str());
435  }
436 
437  if (!option->val ||
438  (remember_ip &&
439  (!strcmp(option->keyword, "hostaddr") ||
440  !strcmp(option->keyword, "port"))))
441  {
442  continue;
443  }
444 
445  if (strlen(option->keyword) > maxFieldSize ||
446  strlen(option->val) > maxFieldSize)
447  {
449  ss << "DB returned a connection option name or value with\n";
450  ss << "excessive size (>" << maxFieldSize << " bytes).\n";
451  ss << "option (possibly truncated): "
452  << std::string_view(
453  option->keyword,
454  std::min(strlen(option->keyword), maxFieldSize))
455  << '\n';
456  ss << " value (possibly truncated): "
457  << std::string_view(
458  option->val, std::min(strlen(option->val), maxFieldSize));
459  Throw<std::runtime_error>(ss.str());
460  }
461  config_.keywords.push_back(option->keyword);
462  config_.values.push_back(option->val);
463  }
464 
465  config_.keywordsIdx.reserve(config_.keywords.size() + 1);
466  config_.valuesIdx.reserve(config_.values.size() + 1);
467  for (std::size_t n = 0; n < config_.keywords.size(); ++n)
468  {
469  config_.keywordsIdx.push_back(config_.keywords[n].c_str());
470  config_.valuesIdx.push_back(config_.values[n].c_str());
471  }
472  config_.keywordsIdx.push_back(nullptr);
473  config_.valuesIdx.push_back(nullptr);
474 
475  get_if_exists(pgConfig, "max_connections", config_.max_connections);
476  std::size_t timeout;
477  if (get_if_exists(pgConfig, "timeout", timeout))
478  config_.timeout = std::chrono::seconds(timeout);
479 }
480 
481 void
482 PgPool::setup()
483 {
484  {
486  ss << "max_connections: " << config_.max_connections << ", "
487  << "timeout: " << config_.timeout.count() << ", "
488  << "connection params: ";
489  bool first = true;
490  for (std::size_t i = 0; i < config_.keywords.size(); ++i)
491  {
492  if (first)
493  first = false;
494  else
495  ss << ", ";
496  ss << config_.keywords[i] << ": "
497  << (config_.keywords[i] == "password" ? "*" : config_.values[i]);
498  }
499  JLOG(j_.debug()) << ss.str();
500  }
501 }
502 
503 void
504 PgPool::onStop()
505 {
507  stop_ = true;
508  cond_.notify_all();
509  idle_.clear();
510  stopped();
511  JLOG(j_.info()) << "stopped";
512 }
513 
514 void
515 PgPool::idleSweeper()
516 {
517  std::size_t before, after;
518  {
520  before = idle_.size();
521  if (config_.timeout != std::chrono::seconds(0))
522  {
523  auto const found =
524  idle_.upper_bound(clock_type::now() - config_.timeout);
525  for (auto it = idle_.begin(); it != found;)
526  {
527  it = idle_.erase(it);
528  --connections_;
529  }
530  }
531  after = idle_.size();
532  }
533 
534  JLOG(j_.info()) << "Idle sweeper. connections: " << connections_
535  << ". checked out: " << connections_ - after
536  << ". idle before, after sweep: " << before << ", "
537  << after;
538 }
539 
541 PgPool::checkout()
542 {
545  do
546  {
547  if (stop_)
548  return {};
549 
550  // If there is a connection in the pool, return the most recent.
551  if (idle_.size())
552  {
553  auto entry = idle_.rbegin();
554  ret = std::move(entry->second);
555  idle_.erase(std::next(entry).base());
556  }
557  // Otherwise, return a new connection unless over threshold.
558  else if (connections_ < config_.max_connections)
559  {
560  ++connections_;
561  ret = std::make_unique<Pg>(config_, j_, stop_, mutex_);
562  }
563  // Otherwise, wait until a connection becomes available or we stop.
564  else
565  {
566  JLOG(j_.error()) << "No database connections available.";
567  cond_.wait(lock);
568  }
569  } while (!ret && !stop_);
570  lock.unlock();
571 
572  return ret;
573 }
574 
575 void
576 PgPool::checkin(std::unique_ptr<Pg>& pg)
577 {
578  if (pg)
579  {
581  if (!stop_ && pg->clear())
582  {
583  idle_.emplace(clock_type::now(), std::move(pg));
584  }
585  else
586  {
587  --connections_;
588  pg.reset();
589  }
590  }
591 
592  cond_.notify_all();
593 }
594 
595 //-----------------------------------------------------------------------------
596 
598 make_PgPool(Section const& pgConfig, Stoppable& parent, beast::Journal j)
599 {
600  auto ret = std::make_shared<PgPool>(pgConfig, parent, j);
601  ret->setup();
602  return ret;
603 }
604 
605 //-----------------------------------------------------------------------------
606 
687 #define LATEST_SCHEMA_VERSION 1
688 
689 char const* version_query = R"(
690 CREATE TABLE IF NOT EXISTS version (version int NOT NULL,
691  fresh_pending int NOT NULL);
692 
693 -- Version 0 means that no schema has been fully deployed.
694 DO $$
695 BEGIN
696  IF NOT EXISTS (SELECT 1 FROM version) THEN
697  INSERT INTO version VALUES (0, 0);
698 END IF;
699 END $$;
700 
701 -- Function to set the schema version. _in_pending should only be set to
702 -- non-zero prior to an attempt to initialize the schema from scratch.
703 -- After successful initialization, this should set to 0.
704 -- _in_version should be set to the version of schema that has been applied
705 -- once successful application has occurred.
706 CREATE OR REPLACE FUNCTION set_schema_version (
707  _in_version int,
708  _in_pending int
709 ) RETURNS void AS $$
710 DECLARE
711  _current_version int;
712 BEGIN
713  IF _in_version IS NULL OR _in_pending IS NULL THEN RETURN; END IF;
714  IF EXISTS (SELECT 1 FROM version) THEN DELETE FROM version; END IF;
715  INSERT INTO version VALUES (_in_version, _in_pending);
716  RETURN;
717 END;
718 $$ LANGUAGE plpgsql;
719 
720 -- PQexec() returns the output of the last statement in its response.
721 SELECT * FROM version;
722 )";
723 
725  // version 0:
726  "There is no such thing as schema version 0."
727 
728  // version 1:
729  ,
730  R"(
731 -- Table to store ledger headers.
732 CREATE TABLE IF NOT EXISTS ledgers (
733  ledger_seq bigint PRIMARY KEY,
734  ledger_hash bytea NOT NULL,
735  prev_hash bytea NOT NULL,
736  total_coins bigint NOT NULL,
737  closing_time bigint NOT NULL,
738  prev_closing_time bigint NOT NULL,
739  close_time_res bigint NOT NULL,
740  close_flags bigint NOT NULL,
741  account_set_hash bytea NOT NULL,
742  trans_set_hash bytea NOT NULL
743 );
744 
745 -- Index for lookups by ledger hash.
746 CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers
747  USING hash (ledger_hash);
748 
749 -- Transactions table. Deletes from the ledger table
750 -- cascade here based on ledger_seq.
751 CREATE TABLE IF NOT EXISTS transactions (
752  ledger_seq bigint NOT NULL,
753  transaction_index bigint NOT NULL,
754  trans_id bytea NOT NULL,
755  nodestore_hash bytea NOT NULL,
756  constraint transactions_pkey PRIMARY KEY (ledger_seq, transaction_index),
757  constraint transactions_fkey FOREIGN KEY (ledger_seq)
758  REFERENCES ledgers (ledger_seq) ON DELETE CASCADE
759 );
760 
761 -- Index for lookups by transaction hash.
762 CREATE INDEX IF NOT EXISTS transactions_trans_id_idx ON transactions
763  USING hash (trans_id);
764 
765 -- Table that maps accounts to transactions affecting them. Deletes from the
766 -- ledger table by way of transactions table cascade here based on ledger_seq.
767 CREATE TABLE IF NOT EXISTS account_transactions (
768  account bytea NOT NULL,
769  ledger_seq bigint NOT NULL,
770  transaction_index bigint NOT NULL,
771  constraint account_transactions_pkey PRIMARY KEY (account, ledger_seq,
772  transaction_index),
773  constraint account_transactions_fkey FOREIGN KEY (ledger_seq,
774  transaction_index) REFERENCES transactions (
775  ledger_seq, transaction_index) ON DELETE CASCADE
776 );
777 
778 -- Index to allow for fast cascading deletions and referential integrity.
779 CREATE INDEX IF NOT EXISTS fki_account_transactions_idx ON
780  account_transactions USING btree (ledger_seq, transaction_index);
781 
782 -- Avoid inadvertent administrative tampering with committed data.
783 CREATE OR REPLACE RULE ledgers_update_protect AS ON UPDATE TO
784  ledgers DO INSTEAD NOTHING;
785 CREATE OR REPLACE RULE transactions_update_protect AS ON UPDATE TO
786  transactions DO INSTEAD NOTHING;
787 CREATE OR REPLACE RULE account_transactions_update_protect AS ON UPDATE TO
788  account_transactions DO INSTEAD NOTHING;
789 
790 -- Stored procedure to assist with the tx() RPC call. Takes transaction hash
791 -- as input. If found, returns the ledger sequence in which it was applied.
792 -- If not, returns the range of ledgers searched.
793 CREATE OR REPLACE FUNCTION tx (
794  _in_trans_id bytea
795 ) RETURNS jsonb AS $$
796 DECLARE
797  _min_ledger bigint := min_ledger();
798  _min_seq bigint := (SELECT ledger_seq
799  FROM ledgers
800  WHERE ledger_seq = _min_ledger
801  FOR SHARE);
802  _max_seq bigint := max_ledger();
803  _ledger_seq bigint;
804  _nodestore_hash bytea;
805 BEGIN
806 
807  IF _min_seq IS NULL THEN
808  RETURN jsonb_build_object('error', 'empty database');
809  END IF;
810  IF length(_in_trans_id) != 32 THEN
811  RETURN jsonb_build_object('error', '_in_trans_id size: '
812  || to_char(length(_in_trans_id), '999'));
813  END IF;
814 
815  EXECUTE 'SELECT nodestore_hash, ledger_seq
816  FROM transactions
817  WHERE trans_id = $1
818  AND ledger_seq BETWEEN $2 AND $3
819  ' INTO _nodestore_hash, _ledger_seq USING _in_trans_id, _min_seq, _max_seq;
820  IF _nodestore_hash IS NULL THEN
821  RETURN jsonb_build_object('min_seq', _min_seq, 'max_seq', _max_seq);
822  END IF;
823  RETURN jsonb_build_object('nodestore_hash', _nodestore_hash, 'ledger_seq',
824  _ledger_seq);
825 END;
826 $$ LANGUAGE plpgsql;
827 
828 -- Return the earliest ledger sequence intended for range operations
829 -- that protect the bottom of the range from deletion. Return NULL if empty.
830 CREATE OR REPLACE FUNCTION min_ledger () RETURNS bigint AS $$
831 DECLARE
832  _min_seq bigint := (SELECT ledger_seq from min_seq);
833 BEGIN
834  IF _min_seq IS NULL THEN
835  RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq ASC LIMIT 1);
836  ELSE
837  RETURN _min_seq;
838  END IF;
839 END;
840 $$ LANGUAGE plpgsql;
841 
842 -- Return the latest ledger sequence in the database, or NULL if empty.
843 CREATE OR REPLACE FUNCTION max_ledger () RETURNS bigint AS $$
844 BEGIN
845  RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1);
846 END;
847 $$ LANGUAGE plpgsql;
848 
849 -- account_tx() RPC helper. From the rippled reporting process, only the
850 -- parameters without defaults are required. For the parameters with
851 -- defaults, validation should be done by rippled, such as:
852 -- _in_account_id should be a valid xrp base58 address.
853 -- _in_forward either true or false according to the published api
854 -- _in_limit should be validated and not simply passed through from
855 -- client.
856 --
857 -- For _in_ledger_index_min and _in_ledger_index_max, if passed in the
858 -- request, verify that their type is int and pass through as is.
859 -- For _ledger_hash, verify and convert from hex length 32 bytes and
860 -- prepend with \x (\\x C++).
861 --
862 -- For _in_ledger_index, if the input type is integer, then pass through
863 -- as is. If the type is string and contents = validated, then do not
864 -- set _in_ledger_index. Instead set _in_invalidated to TRUE.
865 --
866 -- There is no need for rippled to do any type of lookup on max/min
867 -- ledger range, lookup of hash, or the like. This functions does those
868 -- things, including error responses if bad input. Only the above must
869 -- be done to set the correct search range.
870 --
871 -- If a marker is present in the request, verify the members 'ledger'
872 -- and 'seq' are integers and they correspond to _in_marker_seq
873 -- _in_marker_index.
874 -- To reiterate:
875 -- JSON input field 'ledger' corresponds to _in_marker_seq
876 -- JSON input field 'seq' corresponds to _in_marker_index
877 CREATE OR REPLACE FUNCTION account_tx (
878  _in_account_id bytea,
879  _in_forward bool,
880  _in_limit bigint,
881  _in_ledger_index_min bigint = NULL,
882  _in_ledger_index_max bigint = NULL,
883  _in_ledger_hash bytea = NULL,
884  _in_ledger_index bigint = NULL,
885  _in_validated bool = NULL,
886  _in_marker_seq bigint = NULL,
887  _in_marker_index bigint = NULL
888 ) RETURNS jsonb AS $$
889 DECLARE
890  _min bigint;
891  _max bigint;
892  _sort_order text := (SELECT CASE WHEN _in_forward IS TRUE THEN
893  'ASC' ELSE 'DESC' END);
894  _marker bool;
895  _between_min bigint;
896  _between_max bigint;
897  _sql text;
898  _cursor refcursor;
899  _result jsonb;
900  _record record;
901  _tally bigint := 0;
902  _ret_marker jsonb;
903  _transactions jsonb[] := '{}';
904 BEGIN
905  IF _in_ledger_index_min IS NOT NULL OR
906  _in_ledger_index_max IS NOT NULL THEN
907  _min := (SELECT CASE WHEN _in_ledger_index_min IS NULL
908  THEN min_ledger() ELSE greatest(
909  _in_ledger_index_min, min_ledger()) END);
910  _max := (SELECT CASE WHEN _in_ledger_index_max IS NULL OR
911  _in_ledger_index_max = -1 THEN max_ledger() ELSE
912  least(_in_ledger_index_max, max_ledger()) END);
913 
914  IF _max < _min THEN
915  RETURN jsonb_build_object('error', 'max is less than min ledger');
916  END IF;
917 
918  ELSIF _in_ledger_hash IS NOT NULL OR _in_ledger_index IS NOT NULL
919  OR _in_validated IS TRUE THEN
920  IF _in_ledger_hash IS NOT NULL THEN
921  IF length(_in_ledger_hash) != 32 THEN
922  RETURN jsonb_build_object('error', '_in_ledger_hash size: '
923  || to_char(length(_in_ledger_hash), '999'));
924  END IF;
925  EXECUTE 'SELECT ledger_seq
926  FROM ledgers
927  WHERE ledger_hash = $1'
928  INTO _min USING _in_ledger_hash::bytea;
929  ELSE
930  IF _in_ledger_index IS NOT NULL AND _in_validated IS TRUE THEN
931  RETURN jsonb_build_object('error',
932  '_in_ledger_index cannot be set and _in_validated true');
933  END IF;
934  IF _in_validated IS TRUE THEN
935  _in_ledger_index := max_ledger();
936  END IF;
937  _min := (SELECT ledger_seq
938  FROM ledgers
939  WHERE ledger_seq = _in_ledger_index);
940  END IF;
941  IF _min IS NULL THEN
942  RETURN jsonb_build_object('error', 'ledger not found');
943  END IF;
944  _max := _min;
945  ELSE
946  _min := min_ledger();
947  _max := max_ledger();
948  END IF;
949 
950  IF _in_marker_seq IS NOT NULL OR _in_marker_index IS NOT NULL THEN
951  _marker := TRUE;
952  IF _in_marker_seq IS NULL OR _in_marker_index IS NULL THEN
953  -- The rippled implementation returns no transaction results
954  -- if either of these values are missing.
955  _between_min := 0;
956  _between_max := 0;
957  ELSE
958  IF _in_forward IS TRUE THEN
959  _between_min := _in_marker_seq;
960  _between_max := _max;
961  ELSE
962  _between_min := _min;
963  _between_max := _in_marker_seq;
964  END IF;
965  END IF;
966  ELSE
967  _marker := FALSE;
968  _between_min := _min;
969  _between_max := _max;
970  END IF;
971  IF _between_max < _between_min THEN
972  RETURN jsonb_build_object('error', 'ledger search range is '
973  || to_char(_between_min, '999') || '-'
974  || to_char(_between_max, '999'));
975  END IF;
976 
977  _sql := format('
978  SELECT transactions.ledger_seq, transactions.transaction_index,
979  transactions.trans_id, transactions.nodestore_hash
980  FROM transactions
981  INNER JOIN account_transactions
982  ON transactions.ledger_seq =
983  account_transactions.ledger_seq
984  AND transactions.transaction_index =
985  account_transactions.transaction_index
986  WHERE account_transactions.account = $1
987  AND account_transactions.ledger_seq BETWEEN $2 AND $3
988  ORDER BY transactions.ledger_seq %s, transactions.transaction_index %s
989  ', _sort_order, _sort_order);
990 
991  OPEN _cursor FOR EXECUTE _sql USING _in_account_id, _between_min,
992  _between_max;
993  LOOP
994  FETCH _cursor INTO _record;
995  IF _record IS NULL THEN EXIT; END IF;
996  IF _marker IS TRUE THEN
997  IF _in_marker_seq = _record.ledger_seq THEN
998  IF _in_forward IS TRUE THEN
999  IF _in_marker_index > _record.transaction_index THEN
1000  CONTINUE;
1001  END IF;
1002  ELSE
1003  IF _in_marker_index < _record.transaction_index THEN
1004  CONTINUE;
1005  END IF;
1006  END IF;
1007  END IF;
1008  _marker := FALSE;
1009  END IF;
1010 
1011  _tally := _tally + 1;
1012  IF _tally > _in_limit THEN
1013  _ret_marker := jsonb_build_object(
1014  'ledger', _record.ledger_seq,
1015  'seq', _record.transaction_index);
1016  EXIT;
1017  END IF;
1018 
1019  -- Is the transaction index in the tx object?
1020  _transactions := _transactions || jsonb_build_object(
1021  'ledger_seq', _record.ledger_seq,
1022  'transaction_index', _record.transaction_index,
1023  'trans_id', _record.trans_id,
1024  'nodestore_hash', _record.nodestore_hash);
1025 
1026  END LOOP;
1027  CLOSE _cursor;
1028 
1029  _result := jsonb_build_object('ledger_index_min', _min,
1030  'ledger_index_max', _max,
1031  'transactions', _transactions);
1032  IF _ret_marker IS NOT NULL THEN
1033  _result := _result || jsonb_build_object('marker', _ret_marker);
1034  END IF;
1035  RETURN _result;
1036 END;
1037 $$ LANGUAGE plpgsql;
1038 
1039 -- Trigger prior to insert on ledgers table. Validates length of hash fields.
1040 -- Verifies ancestry based on ledger_hash & prev_hash as follows:
1041 -- 1) If ledgers is empty, allows insert.
1042 -- 2) For each new row, check for previous and later ledgers by a single
1043 -- sequence. For each that exist, confirm ancestry based on hashes.
1044 -- 3) Disallow inserts with no prior or next ledger by sequence if any
1045 -- ledgers currently exist. This disallows gaps to be introduced by
1046 -- way of inserting.
1047 CREATE OR REPLACE FUNCTION insert_ancestry() RETURNS TRIGGER AS $$
1048 DECLARE
1049  _parent bytea;
1050  _child bytea;
1051 BEGIN
1052  IF length(NEW.ledger_hash) != 32 OR length(NEW.prev_hash) != 32 THEN
1053  RAISE 'ledger_hash and prev_hash must each be 32 bytes: %', NEW;
1054  END IF;
1055 
1056  IF (SELECT ledger_hash
1057  FROM ledgers
1058  ORDER BY ledger_seq DESC
1059  LIMIT 1) = NEW.prev_hash THEN RETURN NEW; END IF;
1060 
1061  IF NOT EXISTS (SELECT 1 FROM LEDGERS) THEN RETURN NEW; END IF;
1062 
1063  _parent := (SELECT ledger_hash
1064  FROM ledgers
1065  WHERE ledger_seq = NEW.ledger_seq - 1);
1066  _child := (SELECT prev_hash
1067  FROM ledgers
1068  WHERE ledger_seq = NEW.ledger_seq + 1);
1069  IF _parent IS NULL AND _child IS NULL THEN
1070  RAISE 'Ledger Ancestry error: orphan.';
1071  END IF;
1072  IF _parent != NEW.prev_hash THEN
1073  RAISE 'Ledger Ancestry error: bad parent.';
1074  END IF;
1075  IF _child != NEW.ledger_hash THEN
1076  RAISE 'Ledger Ancestry error: bad child.';
1077  END IF;
1078 
1079  RETURN NEW;
1080 END;
1081 $$ LANGUAGE plpgsql;
1082 
1083 -- Trigger function prior to delete on ledgers table. Disallow gaps from
1084 -- forming. Do not allow deletions if both the previous and next ledgers
1085 -- are present. In other words, only allow either the least or greatest
1086 -- to be deleted.
1087 CREATE OR REPLACE FUNCTION delete_ancestry () RETURNS TRIGGER AS $$
1088 BEGIN
1089  IF EXISTS (SELECT 1
1090  FROM ledgers
1091  WHERE ledger_seq = OLD.ledger_seq + 1)
1092  AND EXISTS (SELECT 1
1093  FROM ledgers
1094  WHERE ledger_seq = OLD.ledger_seq - 1) THEN
1095  RAISE 'Ledger Ancestry error: Can only delete the least or greatest '
1096  'ledger.';
1097  END IF;
1098  RETURN OLD;
1099 END;
1100 $$ LANGUAGE plpgsql;
1101 
1102 -- Track the minimum sequence that should be used for ranged queries
1103 -- with protection against deletion during the query. This should
1104 -- be updated before calling online_delete() to not block deleting that
1105 -- range.
1106 CREATE TABLE IF NOT EXISTS min_seq (
1107  ledger_seq bigint NOT NULL
1108 );
1109 
1110 -- Set the minimum sequence for use in ranged queries with protection
1111 -- against deletion greater than or equal to the input parameter. This
1112 -- should be called prior to online_delete() with the same parameter
1113 -- value so that online_delete() is not blocked by range queries
1114 -- that are protected against concurrent deletion of the ledger at
1115 -- the bottom of the range. This function needs to be called from a
1116 -- separate transaction from that which executes online_delete().
1117 CREATE OR REPLACE FUNCTION prepare_delete (
1118  _in_last_rotated bigint
1119 ) RETURNS void AS $$
1120 BEGIN
1121  IF EXISTS (SELECT 1 FROM min_seq) THEN
1122  DELETE FROM min_seq;
1123  END IF;
1124  INSERT INTO min_seq VALUES (_in_last_rotated + 1);
1125 END;
1126 $$ LANGUAGE plpgsql;
1127 
1128 -- Function to delete old data. All data belonging to ledgers prior to and
1129 -- equal to the _in_seq parameter will be deleted. This should be
1130 -- called with the input parameter equivalent to the value of lastRotated
1131 -- in rippled's online_delete routine.
1132 CREATE OR REPLACE FUNCTION online_delete (
1133  _in_seq bigint
1134 ) RETURNS void AS $$
1135 BEGIN
1136  DELETE FROM LEDGERS WHERE ledger_seq <= _in_seq;
1137 END;
1138 $$ LANGUAGE plpgsql;
1139 
1140 -- Function to delete data from the top of the ledger range. Delete
1141 -- everything greater than the input parameter.
1142 -- It doesn't do a normal range delete because of the trigger protecting
1143 -- deletions causing gaps. Instead, it walks back from the greatest ledger.
1144 CREATE OR REPLACE FUNCTION delete_above (
1145  _in_seq bigint
1146 ) RETURNS void AS $$
1147 DECLARE
1148  _max_seq bigint := max_ledger();
1149  _i bigint := _max_seq;
1150 BEGIN
1151  IF _max_seq IS NULL THEN RETURN; END IF;
1152  LOOP
1153  IF _i <= _in_seq THEN RETURN; END IF;
1154  EXECUTE 'DELETE FROM ledgers WHERE ledger_seq = $1' USING _i;
1155  _i := _i - 1;
1156  END LOOP;
1157 END;
1158 $$ LANGUAGE plpgsql;
1159 
1160 -- Verify correct ancestry of ledgers in database:
1161 -- Table to persist last-confirmed latest ledger with proper ancestry.
1162 CREATE TABLE IF NOT EXISTS ancestry_verified (
1163  ledger_seq bigint NOT NULL
1164 );
1165 
1166 -- Function to verify ancestry of ledgers based on ledger_hash and prev_hash.
1167 -- Upon failure, returns ledger sequence failing ancestry check.
1168 -- Otherwise, returns NULL.
1169 -- _in_full: If TRUE, verify entire table. Else verify starting from
1170 -- value in ancestry_verfied table. If no value, then start
1171 -- from lowest ledger.
1172 -- _in_persist: If TRUE, persist the latest ledger with correct ancestry.
1173 -- If an exception was raised because of failure, persist
1174 -- the latest ledger prior to that which failed.
1175 -- _in_min: If set and _in_full is not true, the starting ledger from which
1176 -- to verify.
1177 -- _in_max: If set and _in_full is not true, the latest ledger to verify.
1178 CREATE OR REPLACE FUNCTION check_ancestry (
1179  _in_full bool = FALSE,
1180  _in_persist bool = TRUE,
1181  _in_min bigint = NULL,
1182  _in_max bigint = NULL
1183 ) RETURNS bigint AS $$
1184 DECLARE
1185  _min bigint;
1186  _max bigint;
1187  _last_verified bigint;
1188  _parent ledgers;
1189  _current ledgers;
1190  _cursor refcursor;
1191 BEGIN
1192  IF _in_full IS TRUE AND
1193  (_in_min IS NOT NULL) OR (_in_max IS NOT NULL) THEN
1194  RAISE 'Cannot specify manual range and do full check.';
1195  END IF;
1196 
1197  IF _in_min IS NOT NULL THEN
1198  _min := _in_min;
1199  ELSIF _in_full IS NOT TRUE THEN
1200  _last_verified := (SELECT ledger_seq FROM ancestry_verified);
1201  IF _last_verified IS NULL THEN
1202  _min := min_ledger();
1203  ELSE
1204  _min := _last_verified + 1;
1205  END IF;
1206  ELSE
1207  _min := min_ledger();
1208  END IF;
1209  EXECUTE 'SELECT * FROM ledgers WHERE ledger_seq = $1'
1210  INTO _parent USING _min - 1;
1211  IF _last_verified IS NOT NULL AND _parent IS NULL THEN
1212  RAISE 'Verified ledger % doesn''t exist.', _last_verified;
1213  END IF;
1214 
1215  IF _in_max IS NOT NULL THEN
1216  _max := _in_max;
1217  ELSE
1218  _max := max_ledger();
1219  END IF;
1220 
1221  OPEN _cursor FOR EXECUTE 'SELECT *
1222  FROM ledgers
1223  WHERE ledger_seq BETWEEN $1 AND $2
1224  ORDER BY ledger_seq ASC'
1225  USING _min, _max;
1226  LOOP
1227  FETCH _cursor INTO _current;
1228  IF _current IS NULL THEN EXIT; END IF;
1229  IF _parent IS NOT NULL THEN
1230  IF _current.prev_hash != _parent.ledger_hash THEN
1231  CLOSE _cursor;
1232  RETURN _current.ledger_seq;
1233  RAISE 'Ledger ancestry failure current, parent:% %',
1234  _current, _parent;
1235  END IF;
1236  END IF;
1237  _parent := _current;
1238  END LOOP;
1239  CLOSE _cursor;
1240 
1241  IF _in_persist IS TRUE AND _parent IS NOT NULL THEN
1242  DELETE FROM ancestry_verified;
1243  INSERT INTO ancestry_verified VALUES (_parent.ledger_seq);
1244  END IF;
1245 
1246  RETURN NULL;
1247 END;
1248 $$ LANGUAGE plpgsql;
1249 
1250 -- Return number of whole seconds since the latest ledger was inserted, based
1251 -- on ledger close time (not wall clock) of the insert.
1252 -- Note that ledgers.closing_time is number of seconds since the XRP
1253 -- epoch, which is 01/01/2000 00:00:00. This in turn is 946684800 seconds
1254 -- after the UNIX epoch. This conforms to the "age" field in the
1255 -- server_info RPC call.
1256 CREATE OR REPLACE FUNCTION age () RETURNS bigint AS $$
1257 BEGIN
1258  RETURN (EXTRACT(EPOCH FROM (now())) -
1259  (946684800 + (SELECT closing_time
1260  FROM ledgers
1261  ORDER BY ledger_seq DESC
1262  LIMIT 1)))::bigint;
1263 END;
1264 $$ LANGUAGE plpgsql;
1265 
1266 -- Return range of ledgers, or empty if none. This conforms to the
1267 -- "complete_ledgers" field of the server_info RPC call. Note
1268 -- that ledger gaps are prevented for reporting mode so the range
1269 -- is simply the set between the least and greatest ledgers.
1270 CREATE OR REPLACE FUNCTION complete_ledgers () RETURNS text AS $$
1271 DECLARE
1272  _min bigint := min_ledger();
1273  _max bigint := max_ledger();
1274 BEGIN
1275  IF _min IS NULL THEN RETURN 'empty'; END IF;
1276  IF _min = _max THEN RETURN _min; END IF;
1277  RETURN _min || '-' || _max;
1278 END;
1279 $$ LANGUAGE plpgsql;
1280 
1281 )"
1282 
1283  // version 2:
1284  // , R"(Full idempotent text of schema version 2)"
1285 
1286  // version 3:
1287  // , R"(Full idempotent text of schema version 3)"
1288 
1289  // version 4:
1290  // , R"(Full idempotent text of schema version 4)"
1291 
1292  // ...
1293 
1294  // version n:
1295  // , R"(Full idempotent text of schema version n)"
1296 };
1297 
1299  // upgrade from version 0:
1300  "There is no upgrade path from version 0. Instead, install "
1301  "from full_schemata."
1302  // upgrade from version 1 to 2:
1303  //, R"(Text to idempotently upgrade from version 1 to 2)"
1304  // upgrade from version 2 to 3:
1305  //, R"(Text to idempotently upgrade from version 2 to 3)"
1306  // upgrade from version 3 to 4:
1307  //, R"(Text to idempotently upgrade from version 3 to 4)"
1308  // ...
1309  // upgrade from version n-1 to n:
1310  //, R"(Text to idempotently upgrade from version n-1 to n)"
1311 };
1312 
1326 void
1327 applySchema(
1328  std::shared_ptr<PgPool> const& pool,
1329  char const* schema,
1330  std::uint32_t currentVersion,
1331  std::uint32_t schemaVersion)
1332 {
1333  if (currentVersion != 0 && schemaVersion != currentVersion + 1)
1334  {
1335  assert(false);
1336  std::stringstream ss;
1337  ss << "Schema upgrade versions past initial deployment must increase "
1338  "monotonically. Versions: current, target: "
1339  << currentVersion << ", " << schemaVersion;
1340  Throw<std::runtime_error>(ss.str());
1341  }
1342 
1343  auto res = PgQuery(pool)({schema, {}});
1344  if (!res)
1345  {
1346  std::stringstream ss;
1347  ss << "Error applying schema from version " << currentVersion << "to "
1348  << schemaVersion << ": " << res.msg();
1349  Throw<std::runtime_error>(ss.str());
1350  }
1351 
1352  auto cmd = boost::format(R"(SELECT set_schema_version(%u, 0))");
1353  res = PgQuery(pool)({boost::str(cmd % schemaVersion).c_str(), {}});
1354  if (!res)
1355  {
1356  std::stringstream ss;
1357  ss << "Error setting schema version from " << currentVersion << " to "
1358  << schemaVersion << ": " << res.msg();
1359  Throw<std::runtime_error>(ss.str());
1360  }
1361 }
1362 
1363 void
1364 initSchema(std::shared_ptr<PgPool> const& pool)
1365 {
1366  // Figure out what schema version, if any, is already installed.
1367  auto res = PgQuery(pool)({version_query, {}});
1368  if (!res)
1369  {
1370  std::stringstream ss;
1371  ss << "Error getting database schema version: " << res.msg();
1372  Throw<std::runtime_error>(ss.str());
1373  }
1374  std::uint32_t currentSchemaVersion = res.asInt();
1375  std::uint32_t const pendingSchemaVersion = res.asInt(0, 1);
1376 
1377  // Nothing to do if we are on the latest schema;
1378  if (currentSchemaVersion == LATEST_SCHEMA_VERSION)
1379  return;
1380 
1381  if (currentSchemaVersion == 0)
1382  {
1383  // If a fresh install has not been completed, then re-attempt
1384  // the install of the same schema version.
1385  std::uint32_t const freshVersion =
1386  pendingSchemaVersion ? pendingSchemaVersion : LATEST_SCHEMA_VERSION;
1387  // Persist that we are attempting a fresh install to the latest version.
1388  // This protects against corruption in an aborted install that is
1389  // followed by a fresh installation attempt with a new schema.
1390  auto cmd = boost::format(R"(SELECT set_schema_version(0, %u))");
1391  res = PgQuery(pool)({boost::str(cmd % freshVersion).c_str(), {}});
1392  if (!res)
1393  {
1394  std::stringstream ss;
1395  ss << "Error setting schema version from " << currentSchemaVersion
1396  << " to " << freshVersion << ": " << res.msg();
1397  Throw<std::runtime_error>(ss.str());
1398  }
1399 
1400  // Install the full latest schema.
1401  applySchema(
1402  pool,
1403  full_schemata[freshVersion],
1404  currentSchemaVersion,
1405  freshVersion);
1406  currentSchemaVersion = freshVersion;
1407  }
1408 
1409  // Incrementally upgrade one version at a time until latest.
1410  for (; currentSchemaVersion < LATEST_SCHEMA_VERSION; ++currentSchemaVersion)
1411  {
1412  applySchema(
1413  pool,
1414  upgrade_schemata[currentSchemaVersion],
1415  currentSchemaVersion,
1416  currentSchemaVersion + 1);
1417  }
1418 }
1419 
1420 } // namespace ripple
1421 #endif
sstream
std::strcmp
T strcmp(T... args)
std::this_thread::sleep_for
T sleep_for(T... args)
std::lock
T lock(T... args)
std::strlen
T strlen(T... args)
std::string
STL class.
std::shared_ptr
STL class.
utility
exception
cstring
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
std::string_view
STL class.
functional
std::vector::reserve
T reserve(T... args)
vector
std::vector::size
T size(T... args)
std::chrono::seconds
iterator
std::stringstream
STL class.
std::lock_guard
STL class.
std::unique_ptr::reset
T reset(T... args)
algorithm
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:347
std::vector::push_back
T push_back(T... args)
stdexcept
thread
std::generic_category
T generic_category(T... args)
std::string::c_str
T c_str(T... args)
std::unique_lock
STL class.
std::to_string
T to_string(T... args)
array
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
std::experimental::filesystem::status
T status(T... args)
std::min
T min(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
cstdlib
cassert
std::string::assign
T assign(T... args)
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::after
static bool after(NetClock::time_point now, std::uint32_t mark)
Has the specified time passed?
Definition: Escrow.cpp:88
std::size_t
std::unique_ptr
STL class.
string_view
std::exception::what
T what(T... args)
ripple::get
T & get(EitherAmount &amt)
Definition: AmountSpec.h:116
std::next
T next(T... args)
string