mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-30 16:45:51 +00:00
drop hash index on objects.ledger_sequence. was making initial ingestion slow
This commit is contained in:
@@ -759,8 +759,6 @@ create table if not exists objects6 partition of objects for values from (500000
|
||||
create table if not exists objects7 partition of objects for values from (60000000) to (70000000);
|
||||
|
||||
|
||||
create index if not exists lgr_diff on objects using hash (ledger_seq);
|
||||
|
||||
-- Index for lookups by ledger hash.
|
||||
CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers
|
||||
USING hash (ledger_hash);
|
||||
|
||||
@@ -6,6 +6,10 @@ namespace Backend {
|
||||
PostgresBackend::PostgresBackend(boost::json::object const& config)
|
||||
: pgPool_(make_PgPool(config)), writeConnection_(pgPool_)
|
||||
{
|
||||
if (config.contains("write_interval"))
|
||||
{
|
||||
writeInterval_ = config.at("write_interval").as_int64();
|
||||
}
|
||||
}
|
||||
void
|
||||
PostgresBackend::writeLedger(
|
||||
@@ -67,9 +71,13 @@ PostgresBackend::writeLedgerObject(
|
||||
numRowsInObjectsBuffer_++;
|
||||
// If the buffer gets too large, the insert fails. Not sure why. So we
|
||||
// insert after 1 million records
|
||||
if (numRowsInObjectsBuffer_ % 1000000 == 0)
|
||||
if (numRowsInObjectsBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num objects = "
|
||||
<< numRowsInObjectsBuffer_;
|
||||
writeConnection_.bulkInsert("objects", objectsBuffer_.str());
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
objectsBuffer_ = {};
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ private:
|
||||
mutable PgQuery writeConnection_;
|
||||
mutable bool abortWrite_ = false;
|
||||
mutable boost::asio::thread_pool pool_{200};
|
||||
uint32_t writeInterval_ = 1000000;
|
||||
|
||||
public:
|
||||
PostgresBackend(boost::json::object const& config);
|
||||
|
||||
Reference in New Issue
Block a user