20 #ifdef RIPPLED_REPORTING
26 #include <arpa/inet.h>
27 #include <netinet/in.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
32 #include <ripple/basics/contract.h>
33 #include <ripple/core/Pg.h>
34 #include <boost/asio/ssl/detail/openssl_init.hpp>
35 #include <boost/format.hpp>
55 noticeReceiver(
void* arg, PGresult
const* res)
58 JLOG(j.
info()) <<
"server message: " << PQresultErrorMessage(res);
66 if (error_.has_value())
69 ss << error_->first <<
": " << error_->second;
91 if (PQstatus(conn_.get()) == CONNECTION_OK)
98 conn_.reset(PQconnectdbParams(
99 reinterpret_cast<char const* const*
>(&config_.keywordsIdx[0]),
100 reinterpret_cast<char const* const*
>(&config_.valuesIdx[0]),
103 Throw<std::runtime_error>(
"No db connection struct");
108 if (PQstatus(conn_.get()) == CONNECTION_BAD)
111 ss <<
"DB connection status " << PQstatus(conn_.get()) <<
": "
112 << PQerrorMessage(conn_.get());
113 Throw<std::runtime_error>(ss.
str());
122 Pg::query(
char const* command,
std::size_t nParams,
char const*
const* values)
125 pg_result_type ret{
nullptr, [](PGresult* result) { PQclear(result); }};
140 ret.reset(PQexecParams(
155 ret.reset(PQexec(conn_.get(), command));
158 Throw<std::runtime_error>(
"no result structure returned");
165 JLOG(j_.
error()) <<
"database error, retrying: " << e.
what();
171 switch (PQresultStatus(ret.get()))
173 case PGRES_TUPLES_OK:
174 case PGRES_COMMAND_OK:
177 case PGRES_COPY_BOTH:
181 ss <<
"bad query result: " << PQresStatus(PQresultStatus(ret.get()))
182 <<
" error message: " << PQerrorMessage(conn_.get())
183 <<
", number of tuples: " << PQntuples(ret.get())
184 <<
", number of fields: " << PQnfields(ret.get());
186 PgResult retRes(ret.get(), conn_.get());
192 return PgResult(std::move(ret));
195 static pg_formatted_params
206 for (
auto const& value : values)
211 ss << value->c_str();
224 JLOG(j.
trace()) <<
"query: " << dbParams.first <<
". params: " << ss.
str();
229 Pg::query(pg_params
const& dbParams)
231 char const*
const& command = dbParams.first;
232 auto const formattedParams = formatParams(dbParams, j_);
235 formattedParams.size(),
236 formattedParams.size()
237 ?
reinterpret_cast<char const* const*
>(&formattedParams[0])
242 Pg::bulkInsert(
char const* table,
std::string const& records)
246 static auto copyCmd = boost::format(R
"(COPY %s FROM stdin)");
247 auto res = query(boost::str(copyCmd % table).c_str());
248 if (!res || res.status() != PGRES_COPY_IN)
251 ss <<
"bulkInsert to " << table
252 <<
". Postgres insert error: " << res.msg();
254 ss <<
". Query status not PGRES_COPY_IN: " << res.status();
255 Throw<std::runtime_error>(ss.
str());
258 if (PQputCopyData(conn_.get(), records.
c_str(), records.
size()) == -1)
261 ss <<
"bulkInsert to " << table
262 <<
". PQputCopyData error: " << PQerrorMessage(conn_.get());
264 Throw<std::runtime_error>(ss.
str());
267 if (PQputCopyEnd(conn_.get(),
nullptr) == -1)
270 ss <<
"bulkInsert to " << table
271 <<
". PQputCopyEnd error: " << PQerrorMessage(conn_.get());
273 Throw<std::runtime_error>(ss.
str());
277 pg_result_type copyEndResult{
278 nullptr, [](PGresult* result) { PQclear(result); }};
279 copyEndResult.reset(PQgetResult(conn_.get()));
280 ExecStatusType
status = PQresultStatus(copyEndResult.get());
281 if (status != PGRES_COMMAND_OK)
284 ss <<
"bulkInsert to " << table
285 <<
". PQputCopyEnd status not PGRES_COMMAND_OK: " <<
status;
287 Throw<std::runtime_error>(ss.
str());
298 pg_result_type res{
nullptr, [](PGresult* result) { PQclear(result); }};
303 res.reset(PQgetResult(conn_.get()));
309 switch (PQresultStatus(res.get()))
312 if (PQputCopyEnd(conn_.get(),
nullptr) != -1)
316 case PGRES_COPY_BOTH:
320 }
while (res && conn_);
322 return conn_ !=
nullptr;
327 PgPool::PgPool(Section
const& pgConfig, Stoppable& parent,
beast::Journal j)
328 : Stoppable(
"PgPool", parent), j_(j)
332 static boost::asio::ssl::detail::openssl_init<true> initSsl;
350 pg_connection_type conn(
351 PQconnectdb(get<std::string>(pgConfig,
"conninfo").c_str()),
352 [](PGconn* conn) { PQfinish(conn); });
354 Throw<std::runtime_error>(
"Can't create DB connection.");
355 if (PQstatus(conn.get()) != CONNECTION_OK)
358 ss <<
"Initial DB connection failed: " << PQerrorMessage(conn.get());
359 Throw<std::runtime_error>(ss.
str());
362 int const sockfd = PQsocket(conn.get());
364 Throw<std::runtime_error>(
"No DB socket is open.");
365 struct sockaddr_storage addr;
366 socklen_t len =
sizeof(addr);
367 if (getpeername(sockfd,
reinterpret_cast<struct sockaddr*
>(&addr), &len) ==
370 Throw<std::system_error>(
375 bool const remember_ip =
get(pgConfig,
"remember_ip",
true);
379 config_.keywords.push_back(
"port");
380 config_.keywords.push_back(
"hostaddr");
384 if (addr.ss_family == AF_INET)
386 hostaddr.
assign(INET_ADDRSTRLEN,
'\0');
387 struct sockaddr_in const& ainfo =
388 reinterpret_cast<struct sockaddr_in&
>(addr);
391 AF_INET, &ainfo.sin_addr, &hostaddr[0], hostaddr.
size()))
393 Throw<std::system_error>(
396 "Can't get IPv4 address string.");
399 else if (addr.ss_family == AF_INET6)
401 hostaddr.
assign(INET6_ADDRSTRLEN,
'\0');
402 struct sockaddr_in6 const& ainfo =
403 reinterpret_cast<struct sockaddr_in6&
>(addr);
406 AF_INET6, &ainfo.sin6_addr, &hostaddr[0], hostaddr.
size()))
408 Throw<std::system_error>(
411 "Can't get IPv6 address string.");
415 config_.values.push_back(port.
c_str());
416 config_.values.push_back(hostaddr.
c_str());
418 std::unique_ptr<PQconninfoOption, void (*)(PQconninfoOption*)> connOptions(
419 PQconninfo(conn.get()),
420 [](PQconninfoOption* opts) { PQconninfoFree(opts); });
422 Throw<std::runtime_error>(
"Can't get DB connection options.");
425 for (PQconninfoOption* option = connOptions.get();
426 option->keyword !=
nullptr;
429 if (++nfields > maxFields)
432 ss <<
"DB returned connection options with > " << maxFields
434 Throw<std::runtime_error>(ss.
str());
439 (!
strcmp(option->keyword,
"hostaddr") ||
440 !
strcmp(option->keyword,
"port"))))
445 if (
strlen(option->keyword) > maxFieldSize ||
446 strlen(option->val) > maxFieldSize)
449 ss <<
"DB returned a connection option name or value with\n";
450 ss <<
"excessive size (>" << maxFieldSize <<
" bytes).\n";
451 ss <<
"option (possibly truncated): "
456 ss <<
" value (possibly truncated): "
459 Throw<std::runtime_error>(ss.
str());
461 config_.keywords.push_back(option->keyword);
462 config_.values.push_back(option->val);
465 config_.keywordsIdx.reserve(config_.keywords.size() + 1);
466 config_.valuesIdx.reserve(config_.values.size() + 1);
467 for (
std::size_t n = 0; n < config_.keywords.size(); ++n)
469 config_.keywordsIdx.push_back(config_.keywords[n].c_str());
470 config_.valuesIdx.push_back(config_.values[n].c_str());
472 config_.keywordsIdx.push_back(
nullptr);
473 config_.valuesIdx.push_back(
nullptr);
475 get_if_exists(pgConfig,
"max_connections", config_.max_connections);
486 ss <<
"max_connections: " << config_.max_connections <<
", "
487 <<
"timeout: " << config_.timeout.count() <<
", "
488 <<
"connection params: ";
490 for (
std::size_t i = 0; i < config_.keywords.size(); ++i)
496 ss << config_.keywords[i] <<
": "
497 << (config_.keywords[i] ==
"password" ?
"*" : config_.values[i]);
511 JLOG(j_.
info()) <<
"stopped";
515 PgPool::idleSweeper()
520 before = idle_.size();
524 idle_.upper_bound(clock_type::now() - config_.timeout);
525 for (
auto it = idle_.begin(); it != found;)
527 it = idle_.erase(it);
531 after = idle_.size();
534 JLOG(j_.
info()) <<
"Idle sweeper. connections: " << connections_
535 <<
". checked out: " << connections_ -
after
536 <<
". idle before, after sweep: " << before <<
", "
553 auto entry = idle_.rbegin();
554 ret = std::move(entry->second);
558 else if (connections_ < config_.max_connections)
561 ret = std::make_unique<Pg>(config_, j_, stop_, mutex_);
566 JLOG(j_.
error()) <<
"No database connections available.";
569 }
while (!ret && !stop_);
581 if (!stop_ && pg->clear())
583 idle_.emplace(clock_type::now(), std::move(pg));
598 make_PgPool(Section
const& pgConfig, Stoppable& parent,
beast::Journal j)
600 auto ret = std::make_shared<PgPool>(pgConfig, parent, j);
687 #define LATEST_SCHEMA_VERSION 1
689 char const* version_query = R
"(
690 CREATE TABLE IF NOT EXISTS version (version int NOT NULL,
691 fresh_pending int NOT NULL);
693 -- Version 0 means that no schema has been fully deployed.
696 IF NOT EXISTS (SELECT 1 FROM version) THEN
697 INSERT INTO version VALUES (0, 0);
701 -- Function to set the schema version. _in_pending should only be set to
702 -- non-zero prior to an attempt to initialize the schema from scratch.
703 -- After successful initialization, this should set to 0.
704 -- _in_version should be set to the version of schema that has been applied
705 -- once successful application has occurred.
706 CREATE OR REPLACE FUNCTION set_schema_version (
711 _current_version int;
713 IF _in_version IS NULL OR _in_pending IS NULL THEN RETURN; END IF;
714 IF EXISTS (SELECT 1 FROM version) THEN DELETE FROM version; END IF;
715 INSERT INTO version VALUES (_in_version, _in_pending);
720 -- PQexec() returns the output of the last statement in its response.
721 SELECT * FROM version;
726 "There is no such thing as schema version 0."
731 -- Table to store ledger headers.
732 CREATE TABLE IF NOT EXISTS ledgers (
733 ledger_seq bigint PRIMARY KEY,
734 ledger_hash bytea NOT NULL,
735 prev_hash bytea NOT NULL,
736 total_coins bigint NOT NULL,
737 closing_time bigint NOT NULL,
738 prev_closing_time bigint NOT NULL,
739 close_time_res bigint NOT NULL,
740 close_flags bigint NOT NULL,
741 account_set_hash bytea NOT NULL,
742 trans_set_hash bytea NOT NULL
745 -- Index for lookups by ledger hash.
746 CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers
747 USING hash (ledger_hash);
749 -- Transactions table. Deletes from the ledger table
750 -- cascade here based on ledger_seq.
751 CREATE TABLE IF NOT EXISTS transactions (
752 ledger_seq bigint NOT NULL,
753 transaction_index bigint NOT NULL,
754 trans_id bytea NOT NULL,
755 nodestore_hash bytea NOT NULL,
756 constraint transactions_pkey PRIMARY KEY (ledger_seq, transaction_index),
757 constraint transactions_fkey FOREIGN KEY (ledger_seq)
758 REFERENCES ledgers (ledger_seq) ON DELETE CASCADE
761 -- Index for lookups by transaction hash.
762 CREATE INDEX IF NOT EXISTS transactions_trans_id_idx ON transactions
763 USING hash (trans_id);
765 -- Table that maps accounts to transactions affecting them. Deletes from the
766 -- ledger table by way of transactions table cascade here based on ledger_seq.
767 CREATE TABLE IF NOT EXISTS account_transactions (
768 account bytea NOT NULL,
769 ledger_seq bigint NOT NULL,
770 transaction_index bigint NOT NULL,
771 constraint account_transactions_pkey PRIMARY KEY (account, ledger_seq,
773 constraint account_transactions_fkey FOREIGN KEY (ledger_seq,
774 transaction_index) REFERENCES transactions (
775 ledger_seq, transaction_index) ON DELETE CASCADE
778 -- Index to allow for fast cascading deletions and referential integrity.
779 CREATE INDEX IF NOT EXISTS fki_account_transactions_idx ON
780 account_transactions USING btree (ledger_seq, transaction_index);
782 -- Avoid inadvertent administrative tampering with committed data.
783 CREATE OR REPLACE RULE ledgers_update_protect AS ON UPDATE TO
784 ledgers DO INSTEAD NOTHING;
785 CREATE OR REPLACE RULE transactions_update_protect AS ON UPDATE TO
786 transactions DO INSTEAD NOTHING;
787 CREATE OR REPLACE RULE account_transactions_update_protect AS ON UPDATE TO
788 account_transactions DO INSTEAD NOTHING;
790 -- Stored procedure to assist with the tx() RPC call. Takes transaction hash
791 -- as input. If found, returns the ledger sequence in which it was applied.
792 -- If not, returns the range of ledgers searched.
793 CREATE OR REPLACE FUNCTION tx (
795 ) RETURNS jsonb AS $$
797 _min_ledger bigint := min_ledger();
798 _min_seq bigint := (SELECT ledger_seq
800 WHERE ledger_seq = _min_ledger
802 _max_seq bigint := max_ledger();
804 _nodestore_hash bytea;
807 IF _min_seq IS NULL THEN
808 RETURN jsonb_build_object('error', 'empty database');
810 IF length(_in_trans_id) != 32 THEN
811 RETURN jsonb_build_object('error', '_in_trans_id size: '
812 || to_char(length(_in_trans_id), '999'));
815 EXECUTE 'SELECT nodestore_hash, ledger_seq
818 AND ledger_seq BETWEEN $2 AND $3
819 ' INTO _nodestore_hash, _ledger_seq USING _in_trans_id, _min_seq, _max_seq;
820 IF _nodestore_hash IS NULL THEN
821 RETURN jsonb_build_object('min_seq', _min_seq, 'max_seq', _max_seq);
823 RETURN jsonb_build_object('nodestore_hash', _nodestore_hash, 'ledger_seq',
828 -- Return the earliest ledger sequence intended for range operations
829 -- that protect the bottom of the range from deletion. Return NULL if empty.
830 CREATE OR REPLACE FUNCTION min_ledger () RETURNS bigint AS $$
832 _min_seq bigint := (SELECT ledger_seq from min_seq);
834 IF _min_seq IS NULL THEN
835 RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq ASC LIMIT 1);
842 -- Return the latest ledger sequence in the database, or NULL if empty.
843 CREATE OR REPLACE FUNCTION max_ledger () RETURNS bigint AS $$
845 RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1);
849 -- account_tx() RPC helper. From the rippled reporting process, only the
850 -- parameters without defaults are required. For the parameters with
851 -- defaults, validation should be done by rippled, such as:
852 -- _in_account_id should be a valid xrp base58 address.
853 -- _in_forward either true or false according to the published api
854 -- _in_limit should be validated and not simply passed through from
857 -- For _in_ledger_index_min and _in_ledger_index_max, if passed in the
858 -- request, verify that their type is int and pass through as is.
859 -- For _ledger_hash, verify and convert from hex length 32 bytes and
860 -- prepend with \x (\\x C++).
862 -- For _in_ledger_index, if the input type is integer, then pass through
863 -- as is. If the type is string and contents = validated, then do not
864 -- set _in_ledger_index. Instead set _in_invalidated to TRUE.
866 -- There is no need for rippled to do any type of lookup on max/min
867 -- ledger range, lookup of hash, or the like. This functions does those
868 -- things, including error responses if bad input. Only the above must
869 -- be done to set the correct search range.
871 -- If a marker is present in the request, verify the members 'ledger'
872 -- and 'seq' are integers and they correspond to _in_marker_seq
875 -- JSON input field 'ledger' corresponds to _in_marker_seq
876 -- JSON input field 'seq' corresponds to _in_marker_index
877 CREATE OR REPLACE FUNCTION account_tx (
878 _in_account_id bytea,
881 _in_ledger_index_min bigint = NULL,
882 _in_ledger_index_max bigint = NULL,
883 _in_ledger_hash bytea = NULL,
884 _in_ledger_index bigint = NULL,
885 _in_validated bool = NULL,
886 _in_marker_seq bigint = NULL,
887 _in_marker_index bigint = NULL
888 ) RETURNS jsonb AS $$
892 _sort_order text := (SELECT CASE WHEN _in_forward IS TRUE THEN
893 'ASC' ELSE 'DESC' END);
903 _transactions jsonb[] := '{}';
905 IF _in_ledger_index_min IS NOT NULL OR
906 _in_ledger_index_max IS NOT NULL THEN
907 _min := (SELECT CASE WHEN _in_ledger_index_min IS NULL
908 THEN min_ledger() ELSE greatest(
909 _in_ledger_index_min, min_ledger()) END);
910 _max := (SELECT CASE WHEN _in_ledger_index_max IS NULL OR
911 _in_ledger_index_max = -1 THEN max_ledger() ELSE
912 least(_in_ledger_index_max, max_ledger()) END);
915 RETURN jsonb_build_object('error', 'max is less than min ledger');
918 ELSIF _in_ledger_hash IS NOT NULL OR _in_ledger_index IS NOT NULL
919 OR _in_validated IS TRUE THEN
920 IF _in_ledger_hash IS NOT NULL THEN
921 IF length(_in_ledger_hash) != 32 THEN
922 RETURN jsonb_build_object('error', '_in_ledger_hash size: '
923 || to_char(length(_in_ledger_hash), '999'));
925 EXECUTE 'SELECT ledger_seq
927 WHERE ledger_hash = $1'
928 INTO _min USING _in_ledger_hash::bytea;
930 IF _in_ledger_index IS NOT NULL AND _in_validated IS TRUE THEN
931 RETURN jsonb_build_object('error',
932 '_in_ledger_index cannot be set and _in_validated true');
934 IF _in_validated IS TRUE THEN
935 _in_ledger_index := max_ledger();
937 _min := (SELECT ledger_seq
939 WHERE ledger_seq = _in_ledger_index);
942 RETURN jsonb_build_object('error', 'ledger not found');
946 _min := min_ledger();
947 _max := max_ledger();
950 IF _in_marker_seq IS NOT NULL OR _in_marker_index IS NOT NULL THEN
952 IF _in_marker_seq IS NULL OR _in_marker_index IS NULL THEN
953 -- The rippled implementation returns no transaction results
954 -- if either of these values are missing.
958 IF _in_forward IS TRUE THEN
959 _between_min := _in_marker_seq;
960 _between_max := _max;
962 _between_min := _min;
963 _between_max := _in_marker_seq;
968 _between_min := _min;
969 _between_max := _max;
971 IF _between_max < _between_min THEN
972 RETURN jsonb_build_object('error', 'ledger search range is '
973 || to_char(_between_min, '999') || '-'
974 || to_char(_between_max, '999'));
978 SELECT transactions.ledger_seq, transactions.transaction_index,
979 transactions.trans_id, transactions.nodestore_hash
981 INNER JOIN account_transactions
982 ON transactions.ledger_seq =
983 account_transactions.ledger_seq
984 AND transactions.transaction_index =
985 account_transactions.transaction_index
986 WHERE account_transactions.account = $1
987 AND account_transactions.ledger_seq BETWEEN $2 AND $3
988 ORDER BY transactions.ledger_seq %s, transactions.transaction_index %s
989 ', _sort_order, _sort_order);
991 OPEN _cursor FOR EXECUTE _sql USING _in_account_id, _between_min,
994 FETCH _cursor INTO _record;
995 IF _record IS NULL THEN EXIT; END IF;
996 IF _marker IS TRUE THEN
997 IF _in_marker_seq = _record.ledger_seq THEN
998 IF _in_forward IS TRUE THEN
999 IF _in_marker_index > _record.transaction_index THEN
1003 IF _in_marker_index < _record.transaction_index THEN
1011 _tally := _tally + 1;
1012 IF _tally > _in_limit THEN
1013 _ret_marker := jsonb_build_object(
1014 'ledger', _record.ledger_seq,
1015 'seq', _record.transaction_index);
1019 -- Is the transaction index in the tx object?
1020 _transactions := _transactions || jsonb_build_object(
1021 'ledger_seq', _record.ledger_seq,
1022 'transaction_index', _record.transaction_index,
1023 'trans_id', _record.trans_id,
1024 'nodestore_hash', _record.nodestore_hash);
1029 _result := jsonb_build_object('ledger_index_min', _min,
1030 'ledger_index_max', _max,
1031 'transactions', _transactions);
1032 IF _ret_marker IS NOT NULL THEN
1033 _result := _result || jsonb_build_object('marker', _ret_marker);
1037 $$ LANGUAGE plpgsql;
1039 -- Trigger prior to insert on ledgers table. Validates length of hash fields.
1040 -- Verifies ancestry based on ledger_hash & prev_hash as follows:
1041 -- 1) If ledgers is empty, allows insert.
1042 -- 2) For each new row, check for previous and later ledgers by a single
1043 -- sequence. For each that exist, confirm ancestry based on hashes.
1044 -- 3) Disallow inserts with no prior or next ledger by sequence if any
1045 -- ledgers currently exist. This disallows gaps to be introduced by
1046 -- way of inserting.
1047 CREATE OR REPLACE FUNCTION insert_ancestry() RETURNS TRIGGER AS $$
1052 IF length(NEW.ledger_hash) != 32 OR length(NEW.prev_hash) != 32 THEN
1053 RAISE 'ledger_hash and prev_hash must each be 32 bytes: %', NEW;
1056 IF (SELECT ledger_hash
1058 ORDER BY ledger_seq DESC
1059 LIMIT 1) = NEW.prev_hash THEN RETURN NEW; END IF;
1061 IF NOT EXISTS (SELECT 1 FROM LEDGERS) THEN RETURN NEW; END IF;
1063 _parent := (SELECT ledger_hash
1065 WHERE ledger_seq = NEW.ledger_seq - 1);
1066 _child := (SELECT prev_hash
1068 WHERE ledger_seq = NEW.ledger_seq + 1);
1069 IF _parent IS NULL AND _child IS NULL THEN
1070 RAISE 'Ledger Ancestry error: orphan.';
1072 IF _parent != NEW.prev_hash THEN
1073 RAISE 'Ledger Ancestry error: bad parent.';
1075 IF _child != NEW.ledger_hash THEN
1076 RAISE 'Ledger Ancestry error: bad child.';
1081 $$ LANGUAGE plpgsql;
1083 -- Trigger function prior to delete on ledgers table. Disallow gaps from
1084 -- forming. Do not allow deletions if both the previous and next ledgers
1085 -- are present. In other words, only allow either the least or greatest
1087 CREATE OR REPLACE FUNCTION delete_ancestry () RETURNS TRIGGER AS $$
1091 WHERE ledger_seq = OLD.ledger_seq + 1)
1092 AND EXISTS (SELECT 1
1094 WHERE ledger_seq = OLD.ledger_seq - 1) THEN
1095 RAISE 'Ledger Ancestry error: Can only delete the least or greatest '
1100 $$ LANGUAGE plpgsql;
1102 -- Track the minimum sequence that should be used for ranged queries
1103 -- with protection against deletion during the query. This should
1104 -- be updated before calling online_delete() to not block deleting that
1106 CREATE TABLE IF NOT EXISTS min_seq (
1107 ledger_seq bigint NOT NULL
1110 -- Set the minimum sequence for use in ranged queries with protection
1111 -- against deletion greater than or equal to the input parameter. This
1112 -- should be called prior to online_delete() with the same parameter
1113 -- value so that online_delete() is not blocked by range queries
1114 -- that are protected against concurrent deletion of the ledger at
1115 -- the bottom of the range. This function needs to be called from a
1116 -- separate transaction from that which executes online_delete().
1117 CREATE OR REPLACE FUNCTION prepare_delete (
1118 _in_last_rotated bigint
1119 ) RETURNS void AS $$
1121 IF EXISTS (SELECT 1 FROM min_seq) THEN
1122 DELETE FROM min_seq;
1124 INSERT INTO min_seq VALUES (_in_last_rotated + 1);
1126 $$ LANGUAGE plpgsql;
1128 -- Function to delete old data. All data belonging to ledgers prior to and
1129 -- equal to the _in_seq parameter will be deleted. This should be
1130 -- called with the input parameter equivalent to the value of lastRotated
1131 -- in rippled's online_delete routine.
1132 CREATE OR REPLACE FUNCTION online_delete (
1134 ) RETURNS void AS $$
1136 DELETE FROM LEDGERS WHERE ledger_seq <= _in_seq;
1138 $$ LANGUAGE plpgsql;
1140 -- Function to delete data from the top of the ledger range. Delete
1141 -- everything greater than the input parameter.
1142 -- It doesn't do a normal range delete because of the trigger protecting
1143 -- deletions causing gaps. Instead, it walks back from the greatest ledger.
1144 CREATE OR REPLACE FUNCTION delete_above (
1146 ) RETURNS void AS $$
1148 _max_seq bigint := max_ledger();
1149 _i bigint := _max_seq;
1151 IF _max_seq IS NULL THEN RETURN; END IF;
1153 IF _i <= _in_seq THEN RETURN; END IF;
1154 EXECUTE 'DELETE FROM ledgers WHERE ledger_seq = $1' USING _i;
1158 $$ LANGUAGE plpgsql;
1160 -- Verify correct ancestry of ledgers in database:
1161 -- Table to persist last-confirmed latest ledger with proper ancestry.
1162 CREATE TABLE IF NOT EXISTS ancestry_verified (
1163 ledger_seq bigint NOT NULL
1166 -- Function to verify ancestry of ledgers based on ledger_hash and prev_hash.
1167 -- Upon failure, returns ledger sequence failing ancestry check.
1168 -- Otherwise, returns NULL.
1169 -- _in_full: If TRUE, verify entire table. Else verify starting from
1170 -- value in ancestry_verfied table. If no value, then start
1171 -- from lowest ledger.
1172 -- _in_persist: If TRUE, persist the latest ledger with correct ancestry.
1173 -- If an exception was raised because of failure, persist
1174 -- the latest ledger prior to that which failed.
1175 -- _in_min: If set and _in_full is not true, the starting ledger from which
1177 -- _in_max: If set and _in_full is not true, the latest ledger to verify.
1178 CREATE OR REPLACE FUNCTION check_ancestry (
1179 _in_full bool = FALSE,
1180 _in_persist bool = TRUE,
1181 _in_min bigint = NULL,
1182 _in_max bigint = NULL
1183 ) RETURNS bigint AS $$
1187 _last_verified bigint;
1192 IF _in_full IS TRUE AND
1193 (_in_min IS NOT NULL) OR (_in_max IS NOT NULL) THEN
1194 RAISE 'Cannot specify manual range and do full check.';
1197 IF _in_min IS NOT NULL THEN
1199 ELSIF _in_full IS NOT TRUE THEN
1200 _last_verified := (SELECT ledger_seq FROM ancestry_verified);
1201 IF _last_verified IS NULL THEN
1202 _min := min_ledger();
1204 _min := _last_verified + 1;
1207 _min := min_ledger();
1209 EXECUTE 'SELECT * FROM ledgers WHERE ledger_seq = $1'
1210 INTO _parent USING _min - 1;
1211 IF _last_verified IS NOT NULL AND _parent IS NULL THEN
1212 RAISE 'Verified ledger % doesn''t exist.', _last_verified;
1215 IF _in_max IS NOT NULL THEN
1218 _max := max_ledger();
1221 OPEN _cursor FOR EXECUTE 'SELECT *
1223 WHERE ledger_seq BETWEEN $1 AND $2
1224 ORDER BY ledger_seq ASC'
1227 FETCH _cursor INTO _current;
1228 IF _current IS NULL THEN EXIT; END IF;
1229 IF _parent IS NOT NULL THEN
1230 IF _current.prev_hash != _parent.ledger_hash THEN
1232 RETURN _current.ledger_seq;
1233 RAISE 'Ledger ancestry failure current, parent:% %',
1237 _parent := _current;
1241 IF _in_persist IS TRUE AND _parent IS NOT NULL THEN
1242 DELETE FROM ancestry_verified;
1243 INSERT INTO ancestry_verified VALUES (_parent.ledger_seq);
1248 $$ LANGUAGE plpgsql;
1250 -- Return number of whole seconds since the latest ledger was inserted, based
1251 -- on ledger close time (not wall clock) of the insert.
1252 -- Note that ledgers.closing_time is number of seconds since the XRP
1253 -- epoch, which is 01/01/2000 00:00:00. This in turn is 946684800 seconds
1254 -- after the UNIX epoch. This conforms to the "age" field in the
1255 -- server_info RPC call.
1256 CREATE OR REPLACE FUNCTION age () RETURNS bigint AS $$
1258 RETURN (EXTRACT(EPOCH FROM (now())) -
1259 (946684800 + (SELECT closing_time
1261 ORDER BY ledger_seq DESC
1264 $$ LANGUAGE plpgsql;
1266 -- Return range of ledgers, or empty if none. This conforms to the
1267 -- "complete_ledgers" field of the server_info RPC call. Note
1268 -- that ledger gaps are prevented for reporting mode so the range
1269 -- is simply the set between the least and greatest ledgers.
1270 CREATE OR REPLACE FUNCTION complete_ledgers () RETURNS text AS $$
1272 _min bigint := min_ledger();
1273 _max bigint := max_ledger();
1275 IF _min IS NULL THEN RETURN 'empty'; END IF;
1276 IF _min = _max THEN RETURN _min; END IF;
1277 RETURN _min || '-' || _max;
1279 $$ LANGUAGE plpgsql;
1300 "There is no upgrade path from version 0. Instead, install "
1301 "from full_schemata."
1333 if (currentVersion != 0 && schemaVersion != currentVersion + 1)
1337 ss <<
"Schema upgrade versions past initial deployment must increase "
1338 "monotonically. Versions: current, target: "
1339 << currentVersion <<
", " << schemaVersion;
1340 Throw<std::runtime_error>(ss.
str());
1343 auto res = PgQuery(pool)({schema, {}});
1347 ss <<
"Error applying schema from version " << currentVersion <<
"to "
1348 << schemaVersion <<
": " << res.msg();
1349 Throw<std::runtime_error>(ss.
str());
1352 auto cmd = boost::format(R
"(SELECT set_schema_version(%u, 0))");
1353 res = PgQuery(pool)({boost::str(cmd % schemaVersion).c_str(), {}});
1357 ss <<
"Error setting schema version from " << currentVersion <<
" to "
1358 << schemaVersion <<
": " << res.msg();
1359 Throw<std::runtime_error>(ss.
str());
1367 auto res = PgQuery(pool)({version_query, {}});
1371 ss <<
"Error getting database schema version: " << res.msg();
1372 Throw<std::runtime_error>(ss.
str());
1378 if (currentSchemaVersion == LATEST_SCHEMA_VERSION)
1381 if (currentSchemaVersion == 0)
1386 pendingSchemaVersion ? pendingSchemaVersion : LATEST_SCHEMA_VERSION;
1390 auto cmd = boost::format(R
"(SELECT set_schema_version(0, %u))");
1391 res = PgQuery(pool)({boost::str(cmd % freshVersion).c_str(), {}});
1395 ss <<
"Error setting schema version from " << currentSchemaVersion
1396 <<
" to " << freshVersion <<
": " << res.msg();
1397 Throw<std::runtime_error>(ss.
str());
1403 full_schemata[freshVersion],
1404 currentSchemaVersion,
1406 currentSchemaVersion = freshVersion;
1410 for (; currentSchemaVersion < LATEST_SCHEMA_VERSION; ++currentSchemaVersion)
1414 upgrade_schemata[currentSchemaVersion],
1415 currentSchemaVersion,
1416 currentSchemaVersion + 1);