rippled
Pg.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2020 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 
20 #ifdef RIPPLED_REPORTING
21 // Need raw socket manipulation to determine if postgres socket IPv4 or 6.
22 #if defined(_WIN32)
23 #include <winsock2.h>
24 #include <ws2tcpip.h>
25 #else
26 #include <arpa/inet.h>
27 #include <netinet/in.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
30 #endif
31 
32 #include <ripple/basics/contract.h>
33 #include <ripple/core/Pg.h>
34 #include <boost/asio/ssl/detail/openssl_init.hpp>
35 #include <boost/format.hpp>
36 #include <algorithm>
37 #include <array>
38 #include <cassert>
39 #include <cstdlib>
40 #include <cstring>
41 #include <exception>
42 #include <functional>
43 #include <iterator>
44 #include <sstream>
45 #include <stdexcept>
46 #include <string>
47 #include <string_view>
48 #include <thread>
49 #include <utility>
50 #include <vector>
51 
52 namespace ripple {
53 
54 static void
55 noticeReceiver(void* arg, PGresult const* res)
56 {
57  beast::Journal& j = *static_cast<beast::Journal*>(arg);
58  JLOG(j.info()) << "server message: " << PQresultErrorMessage(res);
59 }
60 
61 //-----------------------------------------------------------------------------
62 
64 PgResult::msg() const
65 {
66  if (error_.has_value())
67  {
69  ss << error_->first << ": " << error_->second;
70  return ss.str();
71  }
72  if (result_)
73  return "ok";
74 
75  // Must be stopping.
76  return "stopping";
77 }
78 
79 //-----------------------------------------------------------------------------
80 
81 /*
82  Connecting described in:
83  https://www.postgresql.org/docs/10/libpq-connect.html
84  */
85 void
86 Pg::connect()
87 {
88  if (conn_)
89  {
90  // Nothing to do if we already have a good connection.
91  if (PQstatus(conn_.get()) == CONNECTION_OK)
92  return;
93  /* Try resetting connection. */
94  PQreset(conn_.get());
95  }
96  else // Make new connection.
97  {
98  conn_.reset(PQconnectdbParams(
99  reinterpret_cast<char const* const*>(&config_.keywordsIdx[0]),
100  reinterpret_cast<char const* const*>(&config_.valuesIdx[0]),
101  0));
102  if (!conn_)
103  Throw<std::runtime_error>("No db connection struct");
104  }
105 
108  if (PQstatus(conn_.get()) == CONNECTION_BAD)
109  {
111  ss << "DB connection status " << PQstatus(conn_.get()) << ": "
112  << PQerrorMessage(conn_.get());
113  Throw<std::runtime_error>(ss.str());
114  }
115 
116  // Log server session console messages.
117  PQsetNoticeReceiver(
118  conn_.get(), noticeReceiver, const_cast<beast::Journal*>(&j_));
119 }
120 
121 PgResult
122 Pg::query(char const* command, std::size_t nParams, char const* const* values)
123 {
124  // The result object must be freed using the libpq API PQclear() call.
125  pg_result_type ret{nullptr, [](PGresult* result) { PQclear(result); }};
126  // Connect then submit query.
127  while (true)
128  {
129  {
131  if (stop_)
132  return PgResult();
133  }
134  try
135  {
136  connect();
137  if (nParams)
138  {
139  // PQexecParams can process only a single command.
140  ret.reset(PQexecParams(
141  conn_.get(),
142  command,
143  nParams,
144  nullptr,
145  values,
146  nullptr,
147  nullptr,
148  0));
149  }
150  else
151  {
152  // PQexec can process multiple commands separated by
153  // semi-colons. Returns the response from the last
154  // command processed.
155  ret.reset(PQexec(conn_.get(), command));
156  }
157  if (!ret)
158  Throw<std::runtime_error>("no result structure returned");
159  break;
160  }
161  catch (std::exception const& e)
162  {
163  // Sever connection and retry until successful.
164  disconnect();
165  JLOG(j_.error()) << "database error, retrying: " << e.what();
167  }
168  }
169 
170  // Ensure proper query execution.
171  switch (PQresultStatus(ret.get()))
172  {
173  case PGRES_TUPLES_OK:
174  case PGRES_COMMAND_OK:
175  case PGRES_COPY_IN:
176  case PGRES_COPY_OUT:
177  case PGRES_COPY_BOTH:
178  break;
179  default: {
181  ss << "bad query result: " << PQresStatus(PQresultStatus(ret.get()))
182  << " error message: " << PQerrorMessage(conn_.get())
183  << ", number of tuples: " << PQntuples(ret.get())
184  << ", number of fields: " << PQnfields(ret.get());
185  JLOG(j_.error()) << ss.str();
186  PgResult retRes(ret.get(), conn_.get());
187  disconnect();
188  return retRes;
189  }
190  }
191 
192  return PgResult(std::move(ret));
193 }
194 
195 static pg_formatted_params
196 formatParams(pg_params const& dbParams, beast::Journal const& j)
197 {
198  std::vector<std::optional<std::string>> const& values = dbParams.second;
199  /* Convert vector to C-style array of C-strings for postgres API.
200  std::nullopt is a proxy for NULL since an empty std::string is
201  0 length but not NULL. */
202  std::vector<char const*> valuesIdx;
203  valuesIdx.reserve(values.size());
205  bool first = true;
206  for (auto const& value : values)
207  {
208  if (value)
209  {
210  valuesIdx.push_back(value->c_str());
211  ss << value->c_str();
212  }
213  else
214  {
215  valuesIdx.push_back(nullptr);
216  ss << "(null)";
217  }
218  if (first)
219  first = false;
220  else
221  ss << ',';
222  }
223 
224  JLOG(j.trace()) << "query: " << dbParams.first << ". params: " << ss.str();
225  return valuesIdx;
226 }
227 
228 PgResult
229 Pg::query(pg_params const& dbParams)
230 {
231  char const* const& command = dbParams.first;
232  auto const formattedParams = formatParams(dbParams, j_);
233  return query(
234  command,
235  formattedParams.size(),
236  formattedParams.size()
237  ? reinterpret_cast<char const* const*>(&formattedParams[0])
238  : nullptr);
239 }
240 
241 void
242 Pg::bulkInsert(char const* table, std::string const& records)
243 {
244  // https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND
245  assert(conn_.get());
246  static auto copyCmd = boost::format(R"(COPY %s FROM stdin)");
247  auto res = query(boost::str(copyCmd % table).c_str());
248  if (!res || res.status() != PGRES_COPY_IN)
249  {
251  ss << "bulkInsert to " << table
252  << ". Postgres insert error: " << res.msg();
253  if (res)
254  ss << ". Query status not PGRES_COPY_IN: " << res.status();
255  Throw<std::runtime_error>(ss.str());
256  }
257 
258  if (PQputCopyData(conn_.get(), records.c_str(), records.size()) == -1)
259  {
261  ss << "bulkInsert to " << table
262  << ". PQputCopyData error: " << PQerrorMessage(conn_.get());
263  disconnect();
264  Throw<std::runtime_error>(ss.str());
265  }
266 
267  if (PQputCopyEnd(conn_.get(), nullptr) == -1)
268  {
270  ss << "bulkInsert to " << table
271  << ". PQputCopyEnd error: " << PQerrorMessage(conn_.get());
272  disconnect();
273  Throw<std::runtime_error>(ss.str());
274  }
275 
276  // The result object must be freed using the libpq API PQclear() call.
277  pg_result_type copyEndResult{
278  nullptr, [](PGresult* result) { PQclear(result); }};
279  copyEndResult.reset(PQgetResult(conn_.get()));
280  ExecStatusType status = PQresultStatus(copyEndResult.get());
281  if (status != PGRES_COMMAND_OK)
282  {
284  ss << "bulkInsert to " << table
285  << ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status;
286  disconnect();
287  Throw<std::runtime_error>(ss.str());
288  }
289 }
290 
291 bool
292 Pg::clear()
293 {
294  if (!conn_)
295  return false;
296 
297  // The result object must be freed using the libpq API PQclear() call.
298  pg_result_type res{nullptr, [](PGresult* result) { PQclear(result); }};
299 
300  // Consume results until no more, or until the connection is severed.
301  do
302  {
303  res.reset(PQgetResult(conn_.get()));
304  if (!res)
305  break;
306 
307  // Pending bulk copy operations may leave the connection in such a
308  // state that it must be disconnected.
309  switch (PQresultStatus(res.get()))
310  {
311  case PGRES_COPY_IN:
312  if (PQputCopyEnd(conn_.get(), nullptr) != -1)
313  break;
314  [[fallthrough]]; // avoids compiler warning
315  case PGRES_COPY_OUT:
316  case PGRES_COPY_BOTH:
317  conn_.reset();
318  default:;
319  }
320  } while (res && conn_);
321 
322  return conn_ != nullptr;
323 }
324 
325 //-----------------------------------------------------------------------------
326 
327 PgPool::PgPool(Section const& pgConfig, beast::Journal j) : j_(j)
328 {
329  // Make sure that boost::asio initializes the SSL library.
330  {
331  static boost::asio::ssl::detail::openssl_init<true> initSsl;
332  }
333  // Don't have postgres client initialize SSL.
334  PQinitOpenSSL(0, 0);
335 
336  /*
337  Connect to postgres to create low level connection parameters
338  with optional caching of network address info for subsequent connections.
339  See https://www.postgresql.org/docs/10/libpq-connect.html
340 
341  For bounds checking of postgres connection data received from
342  the network: the largest size for any connection field in
343  PG source code is 64 bytes as of 5/2019. There are 29 fields.
344  */
345  constexpr std::size_t maxFieldSize = 1024;
346  constexpr std::size_t maxFields = 1000;
347 
348  // The connection object must be freed using the libpq API PQfinish() call.
349  pg_connection_type conn(
350  PQconnectdb(get(pgConfig, "conninfo").c_str()),
351  [](PGconn* conn) { PQfinish(conn); });
352  if (!conn)
353  Throw<std::runtime_error>("Can't create DB connection.");
354  if (PQstatus(conn.get()) != CONNECTION_OK)
355  {
357  ss << "Initial DB connection failed: " << PQerrorMessage(conn.get());
358  Throw<std::runtime_error>(ss.str());
359  }
360 
361  int const sockfd = PQsocket(conn.get());
362  if (sockfd == -1)
363  Throw<std::runtime_error>("No DB socket is open.");
364  struct sockaddr_storage addr;
365  socklen_t len = sizeof(addr);
366  if (getpeername(sockfd, reinterpret_cast<struct sockaddr*>(&addr), &len) ==
367  -1)
368  {
369  Throw<std::system_error>(
370  errno, std::generic_category(), "Can't get server address info.");
371  }
372 
373  // Set "port" and "hostaddr" if we're caching it.
374  bool const remember_ip = get(pgConfig, "remember_ip", true);
375 
376  if (remember_ip)
377  {
378  config_.keywords.push_back("port");
379  config_.keywords.push_back("hostaddr");
380  std::string port;
381  std::string hostaddr;
382 
383  if (addr.ss_family == AF_INET)
384  {
385  hostaddr.assign(INET_ADDRSTRLEN, '\0');
386  struct sockaddr_in const& ainfo =
387  reinterpret_cast<struct sockaddr_in&>(addr);
388  port = std::to_string(ntohs(ainfo.sin_port));
389  if (!inet_ntop(
390  AF_INET, &ainfo.sin_addr, &hostaddr[0], hostaddr.size()))
391  {
392  Throw<std::system_error>(
393  errno,
395  "Can't get IPv4 address string.");
396  }
397  }
398  else if (addr.ss_family == AF_INET6)
399  {
400  hostaddr.assign(INET6_ADDRSTRLEN, '\0');
401  struct sockaddr_in6 const& ainfo =
402  reinterpret_cast<struct sockaddr_in6&>(addr);
403  port = std::to_string(ntohs(ainfo.sin6_port));
404  if (!inet_ntop(
405  AF_INET6, &ainfo.sin6_addr, &hostaddr[0], hostaddr.size()))
406  {
407  Throw<std::system_error>(
408  errno,
410  "Can't get IPv6 address string.");
411  }
412  }
413 
414  config_.values.push_back(port.c_str());
415  config_.values.push_back(hostaddr.c_str());
416  }
417  std::unique_ptr<PQconninfoOption, void (*)(PQconninfoOption*)> connOptions(
418  PQconninfo(conn.get()),
419  [](PQconninfoOption* opts) { PQconninfoFree(opts); });
420  if (!connOptions)
421  Throw<std::runtime_error>("Can't get DB connection options.");
422 
423  std::size_t nfields = 0;
424  for (PQconninfoOption* option = connOptions.get();
425  option->keyword != nullptr;
426  ++option)
427  {
428  if (++nfields > maxFields)
429  {
431  ss << "DB returned connection options with > " << maxFields
432  << " fields.";
433  Throw<std::runtime_error>(ss.str());
434  }
435 
436  if (!option->val ||
437  (remember_ip &&
438  (!strcmp(option->keyword, "hostaddr") ||
439  !strcmp(option->keyword, "port"))))
440  {
441  continue;
442  }
443 
444  if (strlen(option->keyword) > maxFieldSize ||
445  strlen(option->val) > maxFieldSize)
446  {
448  ss << "DB returned a connection option name or value with\n";
449  ss << "excessive size (>" << maxFieldSize << " bytes).\n";
450  ss << "option (possibly truncated): "
451  << std::string_view(
452  option->keyword,
453  std::min(strlen(option->keyword), maxFieldSize))
454  << '\n';
455  ss << " value (possibly truncated): "
456  << std::string_view(
457  option->val, std::min(strlen(option->val), maxFieldSize));
458  Throw<std::runtime_error>(ss.str());
459  }
460  config_.keywords.push_back(option->keyword);
461  config_.values.push_back(option->val);
462  }
463 
464  config_.keywordsIdx.reserve(config_.keywords.size() + 1);
465  config_.valuesIdx.reserve(config_.values.size() + 1);
466  for (std::size_t n = 0; n < config_.keywords.size(); ++n)
467  {
468  config_.keywordsIdx.push_back(config_.keywords[n].c_str());
469  config_.valuesIdx.push_back(config_.values[n].c_str());
470  }
471  config_.keywordsIdx.push_back(nullptr);
472  config_.valuesIdx.push_back(nullptr);
473 
474  get_if_exists(pgConfig, "max_connections", config_.max_connections);
475  std::size_t timeout;
476  if (get_if_exists(pgConfig, "timeout", timeout))
477  config_.timeout = std::chrono::seconds(timeout);
478 }
479 
480 void
481 PgPool::setup()
482 {
483  {
485  ss << "max_connections: " << config_.max_connections << ", "
486  << "timeout: " << config_.timeout.count() << ", "
487  << "connection params: ";
488  bool first = true;
489  for (std::size_t i = 0; i < config_.keywords.size(); ++i)
490  {
491  if (first)
492  first = false;
493  else
494  ss << ", ";
495  ss << config_.keywords[i] << ": "
496  << (config_.keywords[i] == "password" ? "*" : config_.values[i]);
497  }
498  JLOG(j_.debug()) << ss.str();
499  }
500 }
501 
502 void
503 PgPool::stop()
504 {
506  stop_ = true;
507  cond_.notify_all();
508  idle_.clear();
509  JLOG(j_.info()) << "stopped";
510 }
511 
512 void
513 PgPool::idleSweeper()
514 {
515  std::size_t before, after;
516  {
518  before = idle_.size();
519  if (config_.timeout != std::chrono::seconds(0))
520  {
521  auto const found =
522  idle_.upper_bound(clock_type::now() - config_.timeout);
523  for (auto it = idle_.begin(); it != found;)
524  {
525  it = idle_.erase(it);
526  --connections_;
527  }
528  }
529  after = idle_.size();
530  }
531 
532  JLOG(j_.info()) << "Idle sweeper. connections: " << connections_
533  << ". checked out: " << connections_ - after
534  << ". idle before, after sweep: " << before << ", "
535  << after;
536 }
537 
539 PgPool::checkout()
540 {
543  do
544  {
545  if (stop_)
546  return {};
547 
548  // If there is a connection in the pool, return the most recent.
549  if (idle_.size())
550  {
551  auto entry = idle_.rbegin();
552  ret = std::move(entry->second);
553  idle_.erase(std::next(entry).base());
554  }
555  // Otherwise, return a new connection unless over threshold.
556  else if (connections_ < config_.max_connections)
557  {
558  ++connections_;
559  ret = std::make_unique<Pg>(config_, j_, stop_, mutex_);
560  }
561  // Otherwise, wait until a connection becomes available or we stop.
562  else
563  {
564  JLOG(j_.error()) << "No database connections available.";
565  cond_.wait(lock);
566  }
567  } while (!ret && !stop_);
568  lock.unlock();
569 
570  return ret;
571 }
572 
573 void
574 PgPool::checkin(std::unique_ptr<Pg>& pg)
575 {
576  if (pg)
577  {
579  if (!stop_ && pg->clear())
580  {
581  idle_.emplace(clock_type::now(), std::move(pg));
582  }
583  else
584  {
585  --connections_;
586  pg.reset();
587  }
588  }
589 
590  cond_.notify_all();
591 }
592 
593 //-----------------------------------------------------------------------------
594 
596 make_PgPool(Section const& pgConfig, beast::Journal j)
597 {
598  auto ret = std::make_shared<PgPool>(pgConfig, j);
599  ret->setup();
600  return ret;
601 }
602 
603 //-----------------------------------------------------------------------------
604 
685 #define LATEST_SCHEMA_VERSION 1
686 
687 char const* version_query = R"(
688 CREATE TABLE IF NOT EXISTS version (version int NOT NULL,
689  fresh_pending int NOT NULL);
690 
691 -- Version 0 means that no schema has been fully deployed.
692 DO $$
693 BEGIN
694  IF NOT EXISTS (SELECT 1 FROM version) THEN
695  INSERT INTO version VALUES (0, 0);
696 END IF;
697 END $$;
698 
699 -- Function to set the schema version. _in_pending should only be set to
700 -- non-zero prior to an attempt to initialize the schema from scratch.
701 -- After successful initialization, this should set to 0.
702 -- _in_version should be set to the version of schema that has been applied
703 -- once successful application has occurred.
704 CREATE OR REPLACE FUNCTION set_schema_version (
705  _in_version int,
706  _in_pending int
707 ) RETURNS void AS $$
708 DECLARE
709  _current_version int;
710 BEGIN
711  IF _in_version IS NULL OR _in_pending IS NULL THEN RETURN; END IF;
712  IF EXISTS (SELECT 1 FROM version) THEN DELETE FROM version; END IF;
713  INSERT INTO version VALUES (_in_version, _in_pending);
714  RETURN;
715 END;
716 $$ LANGUAGE plpgsql;
717 
718 -- PQexec() returns the output of the last statement in its response.
719 SELECT * FROM version;
720 )";
721 
723  // version 0:
724  "There is no such thing as schema version 0."
725 
726  // version 1:
727  ,
728  R"(
729 -- Table to store ledger headers.
730 CREATE TABLE IF NOT EXISTS ledgers (
731  ledger_seq bigint PRIMARY KEY,
732  ledger_hash bytea NOT NULL,
733  prev_hash bytea NOT NULL,
734  total_coins bigint NOT NULL,
735  closing_time bigint NOT NULL,
736  prev_closing_time bigint NOT NULL,
737  close_time_res bigint NOT NULL,
738  close_flags bigint NOT NULL,
739  account_set_hash bytea NOT NULL,
740  trans_set_hash bytea NOT NULL
741 );
742 
743 -- Index for lookups by ledger hash.
744 CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers
745  USING hash (ledger_hash);
746 
747 -- Transactions table. Deletes from the ledger table
748 -- cascade here based on ledger_seq.
749 CREATE TABLE IF NOT EXISTS transactions (
750  ledger_seq bigint NOT NULL,
751  transaction_index bigint NOT NULL,
752  trans_id bytea NOT NULL,
753  nodestore_hash bytea NOT NULL,
754  constraint transactions_pkey PRIMARY KEY (ledger_seq, transaction_index),
755  constraint transactions_fkey FOREIGN KEY (ledger_seq)
756  REFERENCES ledgers (ledger_seq) ON DELETE CASCADE
757 );
758 
759 -- Index for lookups by transaction hash.
760 CREATE INDEX IF NOT EXISTS transactions_trans_id_idx ON transactions
761  USING hash (trans_id);
762 
763 -- Table that maps accounts to transactions affecting them. Deletes from the
764 -- ledger table by way of transactions table cascade here based on ledger_seq.
765 CREATE TABLE IF NOT EXISTS account_transactions (
766  account bytea NOT NULL,
767  ledger_seq bigint NOT NULL,
768  transaction_index bigint NOT NULL,
769  constraint account_transactions_pkey PRIMARY KEY (account, ledger_seq,
770  transaction_index),
771  constraint account_transactions_fkey FOREIGN KEY (ledger_seq,
772  transaction_index) REFERENCES transactions (
773  ledger_seq, transaction_index) ON DELETE CASCADE
774 );
775 
776 -- Index to allow for fast cascading deletions and referential integrity.
777 CREATE INDEX IF NOT EXISTS fki_account_transactions_idx ON
778  account_transactions USING btree (ledger_seq, transaction_index);
779 
780 -- Avoid inadvertent administrative tampering with committed data.
781 CREATE OR REPLACE RULE ledgers_update_protect AS ON UPDATE TO
782  ledgers DO INSTEAD NOTHING;
783 CREATE OR REPLACE RULE transactions_update_protect AS ON UPDATE TO
784  transactions DO INSTEAD NOTHING;
785 CREATE OR REPLACE RULE account_transactions_update_protect AS ON UPDATE TO
786  account_transactions DO INSTEAD NOTHING;
787 
788 -- Stored procedure to assist with the tx() RPC call. Takes transaction hash
789 -- as input. If found, returns the ledger sequence in which it was applied.
790 -- If not, returns the range of ledgers searched.
791 CREATE OR REPLACE FUNCTION tx (
792  _in_trans_id bytea
793 ) RETURNS jsonb AS $$
794 DECLARE
795  _min_ledger bigint := min_ledger();
796  _min_seq bigint := (SELECT ledger_seq
797  FROM ledgers
798  WHERE ledger_seq = _min_ledger
799  FOR SHARE);
800  _max_seq bigint := max_ledger();
801  _ledger_seq bigint;
802  _nodestore_hash bytea;
803 BEGIN
804 
805  IF _min_seq IS NULL THEN
806  RETURN jsonb_build_object('error', 'empty database');
807  END IF;
808  IF length(_in_trans_id) != 32 THEN
809  RETURN jsonb_build_object('error', '_in_trans_id size: '
810  || to_char(length(_in_trans_id), '999'));
811  END IF;
812 
813  EXECUTE 'SELECT nodestore_hash, ledger_seq
814  FROM transactions
815  WHERE trans_id = $1
816  AND ledger_seq BETWEEN $2 AND $3
817  ' INTO _nodestore_hash, _ledger_seq USING _in_trans_id, _min_seq, _max_seq;
818  IF _nodestore_hash IS NULL THEN
819  RETURN jsonb_build_object('min_seq', _min_seq, 'max_seq', _max_seq);
820  END IF;
821  RETURN jsonb_build_object('nodestore_hash', _nodestore_hash, 'ledger_seq',
822  _ledger_seq);
823 END;
824 $$ LANGUAGE plpgsql;
825 
826 -- Return the earliest ledger sequence intended for range operations
827 -- that protect the bottom of the range from deletion. Return NULL if empty.
828 CREATE OR REPLACE FUNCTION min_ledger () RETURNS bigint AS $$
829 DECLARE
830  _min_seq bigint := (SELECT ledger_seq from min_seq);
831 BEGIN
832  IF _min_seq IS NULL THEN
833  RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq ASC LIMIT 1);
834  ELSE
835  RETURN _min_seq;
836  END IF;
837 END;
838 $$ LANGUAGE plpgsql;
839 
840 -- Return the latest ledger sequence in the database, or NULL if empty.
841 CREATE OR REPLACE FUNCTION max_ledger () RETURNS bigint AS $$
842 BEGIN
843  RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1);
844 END;
845 $$ LANGUAGE plpgsql;
846 
847 -- account_tx() RPC helper. From the rippled reporting process, only the
848 -- parameters without defaults are required. For the parameters with
849 -- defaults, validation should be done by rippled, such as:
850 -- _in_account_id should be a valid xrp base58 address.
851 -- _in_forward either true or false according to the published api
852 -- _in_limit should be validated and not simply passed through from
853 -- client.
854 --
855 -- For _in_ledger_index_min and _in_ledger_index_max, if passed in the
856 -- request, verify that their type is int and pass through as is.
857 -- For _ledger_hash, verify and convert from hex length 32 bytes and
858 -- prepend with \x (\\x C++).
859 --
860 -- For _in_ledger_index, if the input type is integer, then pass through
861 -- as is. If the type is string and contents = validated, then do not
862 -- set _in_ledger_index. Instead set _in_invalidated to TRUE.
863 --
864 -- There is no need for rippled to do any type of lookup on max/min
865 -- ledger range, lookup of hash, or the like. This functions does those
866 -- things, including error responses if bad input. Only the above must
867 -- be done to set the correct search range.
868 --
869 -- If a marker is present in the request, verify the members 'ledger'
870 -- and 'seq' are integers and they correspond to _in_marker_seq
871 -- _in_marker_index.
872 -- To reiterate:
873 -- JSON input field 'ledger' corresponds to _in_marker_seq
874 -- JSON input field 'seq' corresponds to _in_marker_index
875 CREATE OR REPLACE FUNCTION account_tx (
876  _in_account_id bytea,
877  _in_forward bool,
878  _in_limit bigint,
879  _in_ledger_index_min bigint = NULL,
880  _in_ledger_index_max bigint = NULL,
881  _in_ledger_hash bytea = NULL,
882  _in_ledger_index bigint = NULL,
883  _in_validated bool = NULL,
884  _in_marker_seq bigint = NULL,
885  _in_marker_index bigint = NULL
886 ) RETURNS jsonb AS $$
887 DECLARE
888  _min bigint;
889  _max bigint;
890  _sort_order text := (SELECT CASE WHEN _in_forward IS TRUE THEN
891  'ASC' ELSE 'DESC' END);
892  _marker bool;
893  _between_min bigint;
894  _between_max bigint;
895  _sql text;
896  _cursor refcursor;
897  _result jsonb;
898  _record record;
899  _tally bigint := 0;
900  _ret_marker jsonb;
901  _transactions jsonb[] := '{}';
902 BEGIN
903  IF _in_ledger_index_min IS NOT NULL OR
904  _in_ledger_index_max IS NOT NULL THEN
905  _min := (SELECT CASE WHEN _in_ledger_index_min IS NULL
906  THEN min_ledger() ELSE greatest(
907  _in_ledger_index_min, min_ledger()) END);
908  _max := (SELECT CASE WHEN _in_ledger_index_max IS NULL OR
909  _in_ledger_index_max = -1 THEN max_ledger() ELSE
910  least(_in_ledger_index_max, max_ledger()) END);
911 
912  IF _max < _min THEN
913  RETURN jsonb_build_object('error', 'max is less than min ledger');
914  END IF;
915 
916  ELSIF _in_ledger_hash IS NOT NULL OR _in_ledger_index IS NOT NULL
917  OR _in_validated IS TRUE THEN
918  IF _in_ledger_hash IS NOT NULL THEN
919  IF length(_in_ledger_hash) != 32 THEN
920  RETURN jsonb_build_object('error', '_in_ledger_hash size: '
921  || to_char(length(_in_ledger_hash), '999'));
922  END IF;
923  EXECUTE 'SELECT ledger_seq
924  FROM ledgers
925  WHERE ledger_hash = $1'
926  INTO _min USING _in_ledger_hash::bytea;
927  ELSE
928  IF _in_ledger_index IS NOT NULL AND _in_validated IS TRUE THEN
929  RETURN jsonb_build_object('error',
930  '_in_ledger_index cannot be set and _in_validated true');
931  END IF;
932  IF _in_validated IS TRUE THEN
933  _in_ledger_index := max_ledger();
934  END IF;
935  _min := (SELECT ledger_seq
936  FROM ledgers
937  WHERE ledger_seq = _in_ledger_index);
938  END IF;
939  IF _min IS NULL THEN
940  RETURN jsonb_build_object('error', 'ledger not found');
941  END IF;
942  _max := _min;
943  ELSE
944  _min := min_ledger();
945  _max := max_ledger();
946  END IF;
947 
948  IF _in_marker_seq IS NOT NULL OR _in_marker_index IS NOT NULL THEN
949  _marker := TRUE;
950  IF _in_marker_seq IS NULL OR _in_marker_index IS NULL THEN
951  -- The rippled implementation returns no transaction results
952  -- if either of these values are missing.
953  _between_min := 0;
954  _between_max := 0;
955  ELSE
956  IF _in_forward IS TRUE THEN
957  _between_min := _in_marker_seq;
958  _between_max := _max;
959  ELSE
960  _between_min := _min;
961  _between_max := _in_marker_seq;
962  END IF;
963  END IF;
964  ELSE
965  _marker := FALSE;
966  _between_min := _min;
967  _between_max := _max;
968  END IF;
969  IF _between_max < _between_min THEN
970  RETURN jsonb_build_object('error', 'ledger search range is '
971  || to_char(_between_min, '999') || '-'
972  || to_char(_between_max, '999'));
973  END IF;
974 
975  _sql := format('
976  SELECT transactions.ledger_seq, transactions.transaction_index,
977  transactions.trans_id, transactions.nodestore_hash
978  FROM transactions
979  INNER JOIN account_transactions
980  ON transactions.ledger_seq =
981  account_transactions.ledger_seq
982  AND transactions.transaction_index =
983  account_transactions.transaction_index
984  WHERE account_transactions.account = $1
985  AND account_transactions.ledger_seq BETWEEN $2 AND $3
986  ORDER BY transactions.ledger_seq %s, transactions.transaction_index %s
987  ', _sort_order, _sort_order);
988 
989  OPEN _cursor FOR EXECUTE _sql USING _in_account_id, _between_min,
990  _between_max;
991  LOOP
992  FETCH _cursor INTO _record;
993  IF _record IS NULL THEN EXIT; END IF;
994  IF _marker IS TRUE THEN
995  IF _in_marker_seq = _record.ledger_seq THEN
996  IF _in_forward IS TRUE THEN
997  IF _in_marker_index > _record.transaction_index THEN
998  CONTINUE;
999  END IF;
1000  ELSE
1001  IF _in_marker_index < _record.transaction_index THEN
1002  CONTINUE;
1003  END IF;
1004  END IF;
1005  END IF;
1006  _marker := FALSE;
1007  END IF;
1008 
1009  _tally := _tally + 1;
1010  IF _tally > _in_limit THEN
1011  _ret_marker := jsonb_build_object(
1012  'ledger', _record.ledger_seq,
1013  'seq', _record.transaction_index);
1014  EXIT;
1015  END IF;
1016 
1017  -- Is the transaction index in the tx object?
1018  _transactions := _transactions || jsonb_build_object(
1019  'ledger_seq', _record.ledger_seq,
1020  'transaction_index', _record.transaction_index,
1021  'trans_id', _record.trans_id,
1022  'nodestore_hash', _record.nodestore_hash);
1023 
1024  END LOOP;
1025  CLOSE _cursor;
1026 
1027  _result := jsonb_build_object('ledger_index_min', _min,
1028  'ledger_index_max', _max,
1029  'transactions', _transactions);
1030  IF _ret_marker IS NOT NULL THEN
1031  _result := _result || jsonb_build_object('marker', _ret_marker);
1032  END IF;
1033  RETURN _result;
1034 END;
1035 $$ LANGUAGE plpgsql;
1036 
1037 -- Trigger prior to insert on ledgers table. Validates length of hash fields.
1038 -- Verifies ancestry based on ledger_hash & prev_hash as follows:
1039 -- 1) If ledgers is empty, allows insert.
1040 -- 2) For each new row, check for previous and later ledgers by a single
1041 -- sequence. For each that exist, confirm ancestry based on hashes.
1042 -- 3) Disallow inserts with no prior or next ledger by sequence if any
1043 -- ledgers currently exist. This disallows gaps to be introduced by
1044 -- way of inserting.
1045 CREATE OR REPLACE FUNCTION insert_ancestry() RETURNS TRIGGER AS $$
1046 DECLARE
1047  _parent bytea;
1048  _child bytea;
1049 BEGIN
1050  IF length(NEW.ledger_hash) != 32 OR length(NEW.prev_hash) != 32 THEN
1051  RAISE 'ledger_hash and prev_hash must each be 32 bytes: %', NEW;
1052  END IF;
1053 
1054  IF (SELECT ledger_hash
1055  FROM ledgers
1056  ORDER BY ledger_seq DESC
1057  LIMIT 1) = NEW.prev_hash THEN RETURN NEW; END IF;
1058 
1059  IF NOT EXISTS (SELECT 1 FROM LEDGERS) THEN RETURN NEW; END IF;
1060 
1061  _parent := (SELECT ledger_hash
1062  FROM ledgers
1063  WHERE ledger_seq = NEW.ledger_seq - 1);
1064  _child := (SELECT prev_hash
1065  FROM ledgers
1066  WHERE ledger_seq = NEW.ledger_seq + 1);
1067  IF _parent IS NULL AND _child IS NULL THEN
1068  RAISE 'Ledger Ancestry error: orphan.';
1069  END IF;
1070  IF _parent != NEW.prev_hash THEN
1071  RAISE 'Ledger Ancestry error: bad parent.';
1072  END IF;
1073  IF _child != NEW.ledger_hash THEN
1074  RAISE 'Ledger Ancestry error: bad child.';
1075  END IF;
1076 
1077  RETURN NEW;
1078 END;
1079 $$ LANGUAGE plpgsql;
1080 
1081 -- Trigger function prior to delete on ledgers table. Disallow gaps from
1082 -- forming. Do not allow deletions if both the previous and next ledgers
1083 -- are present. In other words, only allow either the least or greatest
1084 -- to be deleted.
1085 CREATE OR REPLACE FUNCTION delete_ancestry () RETURNS TRIGGER AS $$
1086 BEGIN
1087  IF EXISTS (SELECT 1
1088  FROM ledgers
1089  WHERE ledger_seq = OLD.ledger_seq + 1)
1090  AND EXISTS (SELECT 1
1091  FROM ledgers
1092  WHERE ledger_seq = OLD.ledger_seq - 1) THEN
1093  RAISE 'Ledger Ancestry error: Can only delete the least or greatest '
1094  'ledger.';
1095  END IF;
1096  RETURN OLD;
1097 END;
1098 $$ LANGUAGE plpgsql;
1099 
1100 -- Track the minimum sequence that should be used for ranged queries
1101 -- with protection against deletion during the query. This should
1102 -- be updated before calling online_delete() to not block deleting that
1103 -- range.
1104 CREATE TABLE IF NOT EXISTS min_seq (
1105  ledger_seq bigint NOT NULL
1106 );
1107 
1108 -- Set the minimum sequence for use in ranged queries with protection
1109 -- against deletion greater than or equal to the input parameter. This
1110 -- should be called prior to online_delete() with the same parameter
1111 -- value so that online_delete() is not blocked by range queries
1112 -- that are protected against concurrent deletion of the ledger at
1113 -- the bottom of the range. This function needs to be called from a
1114 -- separate transaction from that which executes online_delete().
1115 CREATE OR REPLACE FUNCTION prepare_delete (
1116  _in_last_rotated bigint
1117 ) RETURNS void AS $$
1118 BEGIN
1119  IF EXISTS (SELECT 1 FROM min_seq) THEN
1120  DELETE FROM min_seq;
1121  END IF;
1122  INSERT INTO min_seq VALUES (_in_last_rotated + 1);
1123 END;
1124 $$ LANGUAGE plpgsql;
1125 
1126 -- Function to delete old data. All data belonging to ledgers prior to and
1127 -- equal to the _in_seq parameter will be deleted. This should be
1128 -- called with the input parameter equivalent to the value of lastRotated
1129 -- in rippled's online_delete routine.
1130 CREATE OR REPLACE FUNCTION online_delete (
1131  _in_seq bigint
1132 ) RETURNS void AS $$
1133 BEGIN
1134  DELETE FROM LEDGERS WHERE ledger_seq <= _in_seq;
1135 END;
1136 $$ LANGUAGE plpgsql;
1137 
1138 -- Function to delete data from the top of the ledger range. Delete
1139 -- everything greater than the input parameter.
1140 -- It doesn't do a normal range delete because of the trigger protecting
1141 -- deletions causing gaps. Instead, it walks back from the greatest ledger.
1142 CREATE OR REPLACE FUNCTION delete_above (
1143  _in_seq bigint
1144 ) RETURNS void AS $$
1145 DECLARE
1146  _max_seq bigint := max_ledger();
1147  _i bigint := _max_seq;
1148 BEGIN
1149  IF _max_seq IS NULL THEN RETURN; END IF;
1150  LOOP
1151  IF _i <= _in_seq THEN RETURN; END IF;
1152  EXECUTE 'DELETE FROM ledgers WHERE ledger_seq = $1' USING _i;
1153  _i := _i - 1;
1154  END LOOP;
1155 END;
1156 $$ LANGUAGE plpgsql;
1157 
1158 -- Verify correct ancestry of ledgers in database:
1159 -- Table to persist last-confirmed latest ledger with proper ancestry.
1160 CREATE TABLE IF NOT EXISTS ancestry_verified (
1161  ledger_seq bigint NOT NULL
1162 );
1163 
1164 -- Function to verify ancestry of ledgers based on ledger_hash and prev_hash.
1165 -- Upon failure, returns ledger sequence failing ancestry check.
1166 -- Otherwise, returns NULL.
1167 -- _in_full: If TRUE, verify entire table. Else verify starting from
1168 -- value in ancestry_verfied table. If no value, then start
1169 -- from lowest ledger.
1170 -- _in_persist: If TRUE, persist the latest ledger with correct ancestry.
1171 -- If an exception was raised because of failure, persist
1172 -- the latest ledger prior to that which failed.
1173 -- _in_min: If set and _in_full is not true, the starting ledger from which
1174 -- to verify.
1175 -- _in_max: If set and _in_full is not true, the latest ledger to verify.
1176 CREATE OR REPLACE FUNCTION check_ancestry (
1177  _in_full bool = FALSE,
1178  _in_persist bool = TRUE,
1179  _in_min bigint = NULL,
1180  _in_max bigint = NULL
1181 ) RETURNS bigint AS $$
1182 DECLARE
1183  _min bigint;
1184  _max bigint;
1185  _last_verified bigint;
1186  _parent ledgers;
1187  _current ledgers;
1188  _cursor refcursor;
1189 BEGIN
1190  IF _in_full IS TRUE AND
1191  (_in_min IS NOT NULL) OR (_in_max IS NOT NULL) THEN
1192  RAISE 'Cannot specify manual range and do full check.';
1193  END IF;
1194 
1195  IF _in_min IS NOT NULL THEN
1196  _min := _in_min;
1197  ELSIF _in_full IS NOT TRUE THEN
1198  _last_verified := (SELECT ledger_seq FROM ancestry_verified);
1199  IF _last_verified IS NULL THEN
1200  _min := min_ledger();
1201  ELSE
1202  _min := _last_verified + 1;
1203  END IF;
1204  ELSE
1205  _min := min_ledger();
1206  END IF;
1207  EXECUTE 'SELECT * FROM ledgers WHERE ledger_seq = $1'
1208  INTO _parent USING _min - 1;
1209  IF _last_verified IS NOT NULL AND _parent IS NULL THEN
1210  RAISE 'Verified ledger % doesn''t exist.', _last_verified;
1211  END IF;
1212 
1213  IF _in_max IS NOT NULL THEN
1214  _max := _in_max;
1215  ELSE
1216  _max := max_ledger();
1217  END IF;
1218 
1219  OPEN _cursor FOR EXECUTE 'SELECT *
1220  FROM ledgers
1221  WHERE ledger_seq BETWEEN $1 AND $2
1222  ORDER BY ledger_seq ASC'
1223  USING _min, _max;
1224  LOOP
1225  FETCH _cursor INTO _current;
1226  IF _current IS NULL THEN EXIT; END IF;
1227  IF _parent IS NOT NULL THEN
1228  IF _current.prev_hash != _parent.ledger_hash THEN
1229  CLOSE _cursor;
1230  RETURN _current.ledger_seq;
1231  RAISE 'Ledger ancestry failure current, parent:% %',
1232  _current, _parent;
1233  END IF;
1234  END IF;
1235  _parent := _current;
1236  END LOOP;
1237  CLOSE _cursor;
1238 
1239  IF _in_persist IS TRUE AND _parent IS NOT NULL THEN
1240  DELETE FROM ancestry_verified;
1241  INSERT INTO ancestry_verified VALUES (_parent.ledger_seq);
1242  END IF;
1243 
1244  RETURN NULL;
1245 END;
1246 $$ LANGUAGE plpgsql;
1247 
1248 -- Return number of whole seconds since the latest ledger was inserted, based
1249 -- on ledger close time (not wall clock) of the insert.
1250 -- Note that ledgers.closing_time is number of seconds since the XRP
1251 -- epoch, which is 01/01/2000 00:00:00. This in turn is 946684800 seconds
1252 -- after the UNIX epoch. This conforms to the "age" field in the
1253 -- server_info RPC call.
1254 CREATE OR REPLACE FUNCTION age () RETURNS bigint AS $$
1255 BEGIN
1256  RETURN (EXTRACT(EPOCH FROM (now())) -
1257  (946684800 + (SELECT closing_time
1258  FROM ledgers
1259  ORDER BY ledger_seq DESC
1260  LIMIT 1)))::bigint;
1261 END;
1262 $$ LANGUAGE plpgsql;
1263 
1264 -- Return range of ledgers, or empty if none. This conforms to the
1265 -- "complete_ledgers" field of the server_info RPC call. Note
1266 -- that ledger gaps are prevented for reporting mode so the range
1267 -- is simply the set between the least and greatest ledgers.
1268 CREATE OR REPLACE FUNCTION complete_ledgers () RETURNS text AS $$
1269 DECLARE
1270  _min bigint := min_ledger();
1271  _max bigint := max_ledger();
1272 BEGIN
1273  IF _min IS NULL THEN RETURN 'empty'; END IF;
1274  IF _min = _max THEN RETURN _min; END IF;
1275  RETURN _min || '-' || _max;
1276 END;
1277 $$ LANGUAGE plpgsql;
1278 
1279 )"
1280 
1281  // version 2:
1282  // , R"(Full idempotent text of schema version 2)"
1283 
1284  // version 3:
1285  // , R"(Full idempotent text of schema version 3)"
1286 
1287  // version 4:
1288  // , R"(Full idempotent text of schema version 4)"
1289 
1290  // ...
1291 
1292  // version n:
1293  // , R"(Full idempotent text of schema version n)"
1294 };
1295 
1297  // upgrade from version 0:
1298  "There is no upgrade path from version 0. Instead, install "
1299  "from full_schemata."
1300  // upgrade from version 1 to 2:
1301  //, R"(Text to idempotently upgrade from version 1 to 2)"
1302  // upgrade from version 2 to 3:
1303  //, R"(Text to idempotently upgrade from version 2 to 3)"
1304  // upgrade from version 3 to 4:
1305  //, R"(Text to idempotently upgrade from version 3 to 4)"
1306  // ...
1307  // upgrade from version n-1 to n:
1308  //, R"(Text to idempotently upgrade from version n-1 to n)"
1309 };
1310 
1324 void
1325 applySchema(
1326  std::shared_ptr<PgPool> const& pool,
1327  char const* schema,
1328  std::uint32_t currentVersion,
1329  std::uint32_t schemaVersion)
1330 {
1331  if (currentVersion != 0 && schemaVersion != currentVersion + 1)
1332  {
1333  assert(false);
1334  std::stringstream ss;
1335  ss << "Schema upgrade versions past initial deployment must increase "
1336  "monotonically. Versions: current, target: "
1337  << currentVersion << ", " << schemaVersion;
1338  Throw<std::runtime_error>(ss.str());
1339  }
1340 
1341  auto res = PgQuery(pool)({schema, {}});
1342  if (!res)
1343  {
1344  std::stringstream ss;
1345  ss << "Error applying schema from version " << currentVersion << "to "
1346  << schemaVersion << ": " << res.msg();
1347  Throw<std::runtime_error>(ss.str());
1348  }
1349 
1350  auto cmd = boost::format(R"(SELECT set_schema_version(%u, 0))");
1351  res = PgQuery(pool)({boost::str(cmd % schemaVersion).c_str(), {}});
1352  if (!res)
1353  {
1354  std::stringstream ss;
1355  ss << "Error setting schema version from " << currentVersion << " to "
1356  << schemaVersion << ": " << res.msg();
1357  Throw<std::runtime_error>(ss.str());
1358  }
1359 }
1360 
1361 void
1362 initSchema(std::shared_ptr<PgPool> const& pool)
1363 {
1364  // Figure out what schema version, if any, is already installed.
1365  auto res = PgQuery(pool)({version_query, {}});
1366  if (!res)
1367  {
1368  std::stringstream ss;
1369  ss << "Error getting database schema version: " << res.msg();
1370  Throw<std::runtime_error>(ss.str());
1371  }
1372  std::uint32_t currentSchemaVersion = res.asInt();
1373  std::uint32_t const pendingSchemaVersion = res.asInt(0, 1);
1374 
1375  // Nothing to do if we are on the latest schema;
1376  if (currentSchemaVersion == LATEST_SCHEMA_VERSION)
1377  return;
1378 
1379  if (currentSchemaVersion == 0)
1380  {
1381  // If a fresh install has not been completed, then re-attempt
1382  // the install of the same schema version.
1383  std::uint32_t const freshVersion =
1384  pendingSchemaVersion ? pendingSchemaVersion : LATEST_SCHEMA_VERSION;
1385  // Persist that we are attempting a fresh install to the latest version.
1386  // This protects against corruption in an aborted install that is
1387  // followed by a fresh installation attempt with a new schema.
1388  auto cmd = boost::format(R"(SELECT set_schema_version(0, %u))");
1389  res = PgQuery(pool)({boost::str(cmd % freshVersion).c_str(), {}});
1390  if (!res)
1391  {
1392  std::stringstream ss;
1393  ss << "Error setting schema version from " << currentSchemaVersion
1394  << " to " << freshVersion << ": " << res.msg();
1395  Throw<std::runtime_error>(ss.str());
1396  }
1397 
1398  // Install the full latest schema.
1399  applySchema(
1400  pool,
1401  full_schemata[freshVersion],
1402  currentSchemaVersion,
1403  freshVersion);
1404  currentSchemaVersion = freshVersion;
1405  }
1406 
1407  // Incrementally upgrade one version at a time until latest.
1408  for (; currentSchemaVersion < LATEST_SCHEMA_VERSION; ++currentSchemaVersion)
1409  {
1410  applySchema(
1411  pool,
1412  upgrade_schemata[currentSchemaVersion],
1413  currentSchemaVersion,
1414  currentSchemaVersion + 1);
1415  }
1416 }
1417 
1418 } // namespace ripple
1419 #endif
sstream
std::strcmp
T strcmp(T... args)
std::this_thread::sleep_for
T sleep_for(T... args)
std::lock
T lock(T... args)
std::strlen
T strlen(T... args)
std::string
STL class.
std::shared_ptr< PgPool >
utility
exception
cstring
beast::Journal::trace
Stream trace() const
Severity stream access functions.
Definition: Journal.h:309
std::string_view
STL class.
functional
std::vector::reserve
T reserve(T... args)
vector
std::vector::size
T size(T... args)
std::chrono::seconds
iterator
std::stringstream
STL class.
std::lock_guard
STL class.
std::unique_ptr::reset
T reset(T... args)
algorithm
ripple::get_if_exists
bool get_if_exists(Section const &section, std::string const &name, T &v)
Definition: BasicConfig.h:384
std::vector::push_back
T push_back(T... args)
stdexcept
thread
std::generic_category
T generic_category(T... args)
std::string::c_str
T c_str(T... args)
std::unique_lock
STL class.
std::to_string
T to_string(T... args)
array
beast::Journal::error
Stream error() const
Definition: Journal.h:333
beast::Journal::info
Stream info() const
Definition: Journal.h:321
beast::Journal
A generic endpoint for log messages.
Definition: Journal.h:58
std::uint32_t
std::experimental::filesystem::status
T status(T... args)
std::min
T min(T... args)
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
cstdlib
cassert
std::string::assign
T assign(T... args)
std::stringstream::str
T str(T... args)
beast::Journal::debug
Stream debug() const
Definition: Journal.h:315
ripple::after
static bool after(NetClock::time_point now, std::uint32_t mark)
Has the specified time passed?
Definition: Escrow.cpp:88
std::size_t
std::unique_ptr
STL class.
string_view
std::exception::what
T what(T... args)
ripple::get
T & get(EitherAmount &amt)
Definition: AmountSpec.h:118
std::next
T next(T... args)
string