drop hash index on objects.ledger_sequence. was making initial ingestion slow

This commit is contained in:
CJ Cobb
2021-04-28 18:44:21 +00:00
parent 40d136b3fc
commit 0cfce33724
3 changed files with 10 additions and 3 deletions

View File

@@ -759,8 +759,6 @@ create table if not exists objects6 partition of objects for values from (500000
create table if not exists objects7 partition of objects for values from (60000000) to (70000000); create table if not exists objects7 partition of objects for values from (60000000) to (70000000);
create index if not exists lgr_diff on objects using hash (ledger_seq);
-- Index for lookups by ledger hash. -- Index for lookups by ledger hash.
CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers
USING hash (ledger_hash); USING hash (ledger_hash);

View File

@@ -6,6 +6,10 @@ namespace Backend {
PostgresBackend::PostgresBackend(boost::json::object const& config) PostgresBackend::PostgresBackend(boost::json::object const& config)
: pgPool_(make_PgPool(config)), writeConnection_(pgPool_) : pgPool_(make_PgPool(config)), writeConnection_(pgPool_)
{ {
if (config.contains("write_interval"))
{
writeInterval_ = config.at("write_interval").as_int64();
}
} }
void void
PostgresBackend::writeLedger( PostgresBackend::writeLedger(
@@ -67,9 +71,13 @@ PostgresBackend::writeLedgerObject(
numRowsInObjectsBuffer_++; numRowsInObjectsBuffer_++;
// If the buffer gets too large, the insert fails. Not sure why. So we // If the buffer gets too large, the insert fails. Not sure why. So we
// insert after 1 million records // insert after 1 million records
if (numRowsInObjectsBuffer_ % 1000000 == 0) if (numRowsInObjectsBuffer_ % writeInterval_ == 0)
{ {
BOOST_LOG_TRIVIAL(info)
<< __func__ << " Flushing large buffer. num objects = "
<< numRowsInObjectsBuffer_;
writeConnection_.bulkInsert("objects", objectsBuffer_.str()); writeConnection_.bulkInsert("objects", objectsBuffer_.str());
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
objectsBuffer_ = {}; objectsBuffer_ = {};
} }

View File

@@ -16,6 +16,7 @@ private:
mutable PgQuery writeConnection_; mutable PgQuery writeConnection_;
mutable bool abortWrite_ = false; mutable bool abortWrite_ = false;
mutable boost::asio::thread_pool pool_{200}; mutable boost::asio::thread_pool pool_{200};
uint32_t writeInterval_ = 1000000;
public: public:
PostgresBackend(boost::json::object const& config); PostgresBackend(boost::json::object const& config);