feat: Read and write LedgerCache to file (#2761)

Fixes #2413.
This commit is contained in:
Sergey Kuznetsov
2025-11-13 17:01:40 +00:00
committed by GitHub
parent c6308ce036
commit 346c9f9bdf
35 changed files with 2725 additions and 26 deletions

View File

@@ -29,6 +29,7 @@
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
struct MockLedgerCache : data::LedgerCacheInterface {
@@ -77,4 +78,12 @@ struct MockLedgerCache : data::LedgerCacheInterface {
MOCK_METHOD(float, getSuccessorHitRate, (), (const, override));
MOCK_METHOD(void, waitUntilCacheContainsSeq, (uint32_t), (override));
using LoadFromFileReturnType = std::expected<void, std::string>;
MOCK_METHOD(
LoadFromFileReturnType,
loadFromFile,
(std::string const& path, uint32_t minLatestSequence),
(override)
);
};

View File

@@ -19,10 +19,13 @@
#pragma once
#include <fmt/format.h>
#include <cstdio>
#include <filesystem>
#include <fstream>
#include <ios>
#include <iostream>
#include <string>
#include <string_view>
#include <utility>

View File

@@ -16,6 +16,9 @@ target_sources(
data/cassandra/LedgerHeaderCacheTests.cpp
data/cassandra/RetryPolicyTests.cpp
data/cassandra/SettingsProviderTests.cpp
data/impl/InputFileTests.cpp
data/impl/LedgerCacheFileTests.cpp
data/impl/OutputFileTests.cpp
# Cluster
cluster/ClioNodeTests.cpp
cluster/ClusterCommunicationServiceTests.cpp

View File

@@ -18,11 +18,18 @@
//==============================================================================
#include "data/LedgerCache.hpp"
#include "etl/Models.hpp"
#include "util/MockPrometheus.hpp"
#include "util/TmpFile.hpp"
#include "util/prometheus/Bool.hpp"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <xrpl/basics/base_uint.h>
#include <cstdint>
#include <string>
#include <vector>
using namespace data;
@@ -65,3 +72,173 @@ TEST_F(LedgerCachePrometheusMetricTest, setFull)
EXPECT_CALL(fullMock, value()).WillOnce(testing::Return(1));
EXPECT_TRUE(cache.isFull());
}
struct LedgerCacheSaveLoadTest : LedgerCacheTest {
ripple::uint256 const key1{1};
ripple::uint256 const key2{2};
std::vector<etl::model::Object> const objs{
etl::model::Object{
.key = key1,
.keyRaw = {},
.data = {1, 2, 3, 4, 5},
.dataRaw = {},
.successor = {},
.predecessor = {},
.type = {}
},
etl::model::Object{
.key = key2,
.keyRaw = {},
.data = {6, 7, 8, 9, 10},
.dataRaw = {},
.successor = {},
.predecessor = {},
.type = {}
}
};
uint32_t const kLEDGER_SEQ = 100;
};
TEST_F(LedgerCacheSaveLoadTest, saveToFileFailsWhenCacheNotFull)
{
auto const tmpFile = TmpFile::empty();
ASSERT_FALSE(cache.isFull());
auto const result = cache.saveToFile(tmpFile.path);
ASSERT_FALSE(result.has_value());
EXPECT_EQ(result.error(), "Ledger cache is not full");
}
TEST_F(LedgerCacheSaveLoadTest, saveAndLoadFromFile)
{
cache.update(objs, kLEDGER_SEQ);
cache.setFull();
ASSERT_TRUE(cache.isFull());
EXPECT_EQ(cache.size(), 2u);
EXPECT_EQ(cache.latestLedgerSequence(), kLEDGER_SEQ);
auto const blob1 = cache.get(key1, kLEDGER_SEQ);
ASSERT_TRUE(blob1.has_value());
EXPECT_EQ(blob1.value(), objs.front().data);
auto const blob2 = cache.get(key2, kLEDGER_SEQ);
ASSERT_TRUE(blob2.has_value());
EXPECT_EQ(blob2.value(), objs.back().data);
auto const tmpFile = TmpFile::empty();
auto const saveResult = cache.saveToFile(tmpFile.path);
ASSERT_TRUE(saveResult.has_value()) << "Save failed: " << saveResult.error();
LedgerCache newCache;
auto const loadResult = newCache.loadFromFile(tmpFile.path, 0);
ASSERT_TRUE(loadResult.has_value()) << "Load failed: " << loadResult.error();
EXPECT_TRUE(newCache.isFull());
EXPECT_EQ(newCache.size(), 2u);
EXPECT_EQ(newCache.latestLedgerSequence(), kLEDGER_SEQ);
auto const loadedBlob1 = newCache.get(key1, kLEDGER_SEQ);
ASSERT_TRUE(loadedBlob1.has_value());
EXPECT_EQ(loadedBlob1.value(), blob1);
auto const loadedBlob2 = newCache.get(key2, kLEDGER_SEQ);
ASSERT_TRUE(loadedBlob2.has_value());
EXPECT_EQ(loadedBlob2.value(), blob2);
EXPECT_EQ(newCache.latestLedgerSequence(), cache.latestLedgerSequence());
}
TEST_F(LedgerCacheSaveLoadTest, saveAndLoadFromFileWithDeletedObjects)
{
cache.update(objs, kLEDGER_SEQ - 1);
auto objsCopy = objs;
objsCopy.front().data = {};
cache.update(objsCopy, kLEDGER_SEQ);
cache.setFull();
// Verify deleted object is accessible via getDeleted
auto const blob1 = cache.get(key1, kLEDGER_SEQ);
ASSERT_FALSE(blob1.has_value());
auto const blob2 = cache.get(key2, kLEDGER_SEQ);
ASSERT_TRUE(blob2.has_value());
EXPECT_EQ(blob2.value(), objs.back().data);
auto const deletedBlob = cache.getDeleted(key1, kLEDGER_SEQ - 1);
ASSERT_TRUE(deletedBlob.has_value());
EXPECT_EQ(deletedBlob.value(), objs.front().data);
// Save and load
auto const tmpFile = TmpFile::empty();
auto saveResult = cache.saveToFile(tmpFile.path);
ASSERT_TRUE(saveResult.has_value()) << "Save failed: " << saveResult.error();
LedgerCache newCache;
auto loadResult = newCache.loadFromFile(tmpFile.path, 0);
ASSERT_TRUE(loadResult.has_value()) << "Load failed: " << loadResult.error();
// Verify deleted object is preserved
auto const loadedDeletedBlob = newCache.getDeleted(key1, kLEDGER_SEQ - 1);
ASSERT_TRUE(loadedDeletedBlob.has_value());
EXPECT_EQ(loadedDeletedBlob.value(), deletedBlob);
// Verify active object
auto const loadedBlob1 = newCache.get(key1, kLEDGER_SEQ);
ASSERT_FALSE(loadedBlob1.has_value());
auto const loadedBlob2 = newCache.get(key2, kLEDGER_SEQ);
ASSERT_TRUE(loadedBlob2.has_value());
EXPECT_EQ(loadedBlob2.value(), blob2);
EXPECT_TRUE(newCache.isFull());
EXPECT_EQ(newCache.latestLedgerSequence(), cache.latestLedgerSequence());
}
TEST_F(LedgerCacheTest, SaveFailedDueToFilePermissions)
{
cache.setFull();
auto const result = cache.saveToFile("/");
ASSERT_FALSE(result.has_value());
EXPECT_FALSE(result.error().empty());
}
TEST_F(LedgerCacheTest, loadFromNonExistentFileReturnsError)
{
auto const result = cache.loadFromFile("/nonexistent/path/cache.dat", 0);
ASSERT_FALSE(result.has_value());
EXPECT_FALSE(result.error().empty());
}
TEST_F(LedgerCacheSaveLoadTest, RejectOldCacheFile)
{
uint32_t const cacheSeq = 100;
cache.update(objs, cacheSeq);
cache.setFull();
auto const tmpFile = TmpFile::empty();
auto const saveResult = cache.saveToFile(tmpFile.path);
ASSERT_TRUE(saveResult.has_value());
LedgerCache newCache;
auto const loadResult = newCache.loadFromFile(tmpFile.path, cacheSeq + 1);
EXPECT_FALSE(loadResult.has_value());
EXPECT_THAT(loadResult.error(), ::testing::HasSubstr("too low"));
}
TEST_F(LedgerCacheSaveLoadTest, AcceptRecentCacheFile)
{
uint32_t const cacheSeq = 100;
cache.update(objs, cacheSeq);
cache.setFull();
auto const tmpFile = TmpFile::empty();
auto const saveResult = cache.saveToFile(tmpFile.path);
ASSERT_TRUE(saveResult.has_value());
LedgerCache newCache;
auto const loadResult = newCache.loadFromFile(tmpFile.path, cacheSeq - 1);
ASSERT_TRUE(loadResult.has_value());
EXPECT_EQ(newCache.latestLedgerSequence(), cacheSeq);
}

View File

@@ -0,0 +1,196 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/impl/InputFile.hpp"
#include "util/Shasum.hpp"
#include "util/TmpFile.hpp"
#include <gtest/gtest.h>
#include <xrpl/basics/base_uint.h>
#include <cstdint>
#include <string>
#include <vector>
using namespace data::impl;
struct InputFileTest : ::testing::Test {};
TEST_F(InputFileTest, ConstructorWithValidFile)
{
auto const tmpFile = TmpFile{"Hello, World!"};
InputFile inputFile(tmpFile.path);
EXPECT_TRUE(inputFile.isOpen());
}
TEST_F(InputFileTest, ConstructorWithInvalidFile)
{
InputFile inputFile("/nonexistent/path/file.txt");
EXPECT_FALSE(inputFile.isOpen());
char i = 0;
EXPECT_FALSE(inputFile.read(i));
EXPECT_FALSE(inputFile.readRaw(&i, 1));
}
TEST_F(InputFileTest, ReadRawFromFile)
{
std::string const content = "Test content for reading";
auto tmpFile = TmpFile{content};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
std::vector<char> buffer(content.size());
EXPECT_TRUE(inputFile.readRaw(buffer.data(), buffer.size()));
EXPECT_EQ(std::string(buffer.data(), buffer.size()), content);
}
TEST_F(InputFileTest, ReadRawFromFilePartial)
{
std::string content = "Hello, World!";
auto tmpFile = TmpFile{content};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
std::vector<char> buffer(3);
EXPECT_TRUE(inputFile.readRaw(buffer.data(), buffer.size()));
EXPECT_EQ(std::string(buffer.data(), buffer.size()), "Hel"); // codespell:ignore
buffer.resize(6);
EXPECT_TRUE(inputFile.readRaw(buffer.data(), buffer.size()));
EXPECT_EQ(std::string(buffer.data(), buffer.size()), "lo, Wo");
buffer.resize(4);
EXPECT_TRUE(inputFile.readRaw(buffer.data(), buffer.size()));
EXPECT_EQ(std::string(buffer.data(), buffer.size()), "rld!");
}
TEST_F(InputFileTest, ReadRawAfterEnd)
{
std::string content = "Test";
auto tmpFile = TmpFile{content};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
std::vector<char> buffer(content.size());
EXPECT_TRUE(inputFile.readRaw(buffer.data(), buffer.size()));
char extraByte = 0;
EXPECT_FALSE(inputFile.readRaw(&extraByte, 1));
}
TEST_F(InputFileTest, ReadRawFromFileExceedsSize)
{
auto tmpFile = TmpFile{"Test"};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
std::vector<char> buffer(10); // Larger than file content
EXPECT_FALSE(inputFile.readRaw(buffer.data(), buffer.size()));
}
TEST_F(InputFileTest, ReadTemplateMethod)
{
auto tmpFile = TmpFile{"\x01\x02\x03\x04"};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
std::uint32_t value{0};
bool success = inputFile.read(value);
EXPECT_TRUE(success);
// Note: The actual value depends on endianness
EXPECT_NE(value, 0u);
}
TEST_F(InputFileTest, ReadTemplateMethodFailure)
{
auto tmpFile = TmpFile{"Hi"}; // Only 2 bytes
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
std::uint64_t value{0}; // Trying to read 8 bytes
EXPECT_FALSE(inputFile.read(value));
}
TEST_F(InputFileTest, ReadFromEmptyFile)
{
auto tmpFile = TmpFile::empty();
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
char byte = 0;
EXPECT_FALSE(inputFile.readRaw(&byte, 1));
}
TEST_F(InputFileTest, HashOfEmptyFile)
{
auto tmpFile = TmpFile::empty();
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
EXPECT_EQ(inputFile.hash(), util::sha256sum(""));
}
TEST_F(InputFileTest, HashAfterReading)
{
std::string const content = "Hello, World!";
auto tmpFile = TmpFile{content};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
EXPECT_EQ(inputFile.hash(), util::sha256sum(""));
std::vector<char> buffer(content.size());
EXPECT_TRUE(inputFile.readRaw(buffer.data(), buffer.size()));
EXPECT_EQ(std::string(buffer.data(), buffer.size()), content);
EXPECT_EQ(inputFile.hash(), util::sha256sum(content));
}
TEST_F(InputFileTest, HashProgressesWithReading)
{
std::string const content = "Hello, World!";
auto tmpFile = TmpFile{content};
InputFile inputFile(tmpFile.path);
ASSERT_TRUE(inputFile.isOpen());
EXPECT_EQ(inputFile.hash(), util::sha256sum(""));
// Read first part
std::vector<char> buffer1(5);
EXPECT_TRUE(inputFile.readRaw(buffer1.data(), buffer1.size()));
EXPECT_EQ(inputFile.hash(), util::sha256sum("Hello"));
// Read second part
std::vector<char> buffer2(8);
EXPECT_TRUE(inputFile.readRaw(buffer2.data(), buffer2.size()));
EXPECT_EQ(inputFile.hash(), util::sha256sum(content));
}

View File

@@ -0,0 +1,723 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/LedgerCache.hpp"
#include "data/impl/LedgerCacheFile.hpp"
#include "util/NameGenerator.hpp"
#include "util/TmpFile.hpp"
#include <fmt/format.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <xrpl/basics/base_uint.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <filesystem>
#include <fstream>
#include <ios>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
using namespace data::impl;
struct LedgerCacheFileTestBase : ::testing::Test {
struct DataSizeParams {
size_t mapEntries;
size_t deletedEntries;
size_t blobSize;
std::string description;
};
enum class CorruptionType {
InvalidVersion,
CorruptedSeparator1, // After header
CorruptedSeparator2, // After map hash
CorruptedSeparator3, // After deleted hash
MapKeyCorrupted,
MapSeqCorrupted,
MapBlobSizeCorrupted,
MapBlobDataCorrupted,
DeletedKeyCorrupted,
DeletedSeqCorrupted,
DeletedBlobSizeCorrupted,
DeletedBlobDataCorrupted,
HeaderLatestSeqCorrupted
};
struct CorruptionParams {
CorruptionType type;
std::string description;
};
struct EntryOffsets {
size_t keyOffset;
size_t seqOffset;
size_t blobSizeOffset;
size_t blobDataOffset;
};
struct FileOffsets {
size_t headerOffset;
size_t separator1Offset;
size_t mapStartOffset;
std::vector<EntryOffsets> mapEntries;
size_t separator2Offset;
size_t deletedStartOffset;
std::vector<EntryOffsets> deletedEntries;
size_t separator3Offset;
size_t hashOffset;
static FileOffsets
calculate(LedgerCacheFile::DataView const& dataView)
{
FileOffsets offsets{};
size_t currentOffset = 0;
offsets.headerOffset = currentOffset;
currentOffset += sizeof(LedgerCacheFile::Header);
offsets.separator1Offset = currentOffset;
currentOffset += 16;
// Map entries
offsets.mapStartOffset = currentOffset;
for (auto const& [key, entry] : dataView.map) {
EntryOffsets entryOffsets{};
entryOffsets.keyOffset = currentOffset;
entryOffsets.seqOffset = currentOffset + 32; // uint256 size
entryOffsets.blobSizeOffset = currentOffset + 32 + 4; // + uint32 size
entryOffsets.blobDataOffset = currentOffset + 32 + 4 + 8; // + size_t size
offsets.mapEntries.push_back(entryOffsets);
currentOffset += 32 + 4 + 8 + entry.blob.size(); // key + seq + size + blob
}
// Separator 2 (after map entries)
offsets.separator2Offset = currentOffset;
currentOffset += 16;
// Deleted entries
offsets.deletedStartOffset = currentOffset;
for (auto const& [key, entry] : dataView.deleted) {
EntryOffsets entryOffsets{};
entryOffsets.keyOffset = currentOffset;
entryOffsets.seqOffset = currentOffset + 32;
entryOffsets.blobSizeOffset = currentOffset + 32 + 4;
entryOffsets.blobDataOffset = currentOffset + 32 + 4 + 8;
offsets.deletedEntries.push_back(entryOffsets);
currentOffset += 32 + 4 + 8 + entry.blob.size();
}
// Separator 3 (after deleted entries)
offsets.separator3Offset = currentOffset;
currentOffset += 16;
// Overall file hash
offsets.hashOffset = currentOffset;
return offsets;
}
};
~LedgerCacheFileTestBase() override
{
auto const pathWithNewPrefix = fmt::format("{}.new", tmpFile.path);
if (std::filesystem::exists(pathWithNewPrefix))
std::filesystem::remove(pathWithNewPrefix);
}
static std::vector<DataSizeParams> const kDATA_SIZE_PARAMS;
static std::vector<CorruptionParams> const kCORRUPTION_PARAMS;
TmpFile tmpFile = TmpFile::empty();
static uint32_t constexpr kLATEST_SEQUENCE = 12345;
static LedgerCacheFile::Data
createTestData(size_t mapSize, size_t deletedSize, size_t blobSize)
{
LedgerCacheFile::Data data;
data.latestSeq = kLATEST_SEQUENCE;
for (size_t i = 0; i < mapSize; ++i) {
ripple::uint256 key;
std::memset(key.data(), static_cast<int>(i), ripple::uint256::size());
data::LedgerCache::CacheEntry entry;
entry.seq = static_cast<uint32_t>(1000 + i);
entry.blob.resize(blobSize);
std::memset(entry.blob.data(), static_cast<int>(i + 100), blobSize);
data.map.emplace(key, std::move(entry));
}
for (size_t i = 0; i < deletedSize; ++i) {
ripple::uint256 key;
std::memset(key.data(), static_cast<int>(i + 200), ripple::uint256::size());
data::LedgerCache::CacheEntry entry;
entry.seq = static_cast<uint32_t>(2000 + i);
entry.blob.resize(blobSize);
std::memset(entry.blob.data(), static_cast<int>(i + 250), blobSize);
data.deleted.emplace(key, std::move(entry));
}
return data;
}
static LedgerCacheFile::DataView
toDataView(LedgerCacheFile::Data const& data)
{
return LedgerCacheFile::DataView{.latestSeq = data.latestSeq, .map = data.map, .deleted = data.deleted};
}
void
corruptFile(CorruptionType type, LedgerCacheFile::DataView const& dataView) const
{
std::fstream file(tmpFile.path, std::ios::in | std::ios::out | std::ios::binary);
ASSERT_TRUE(file.is_open());
auto const offsets = FileOffsets::calculate(dataView);
switch (type) {
case CorruptionType::InvalidVersion:
file.seekp(offsets.headerOffset);
{
uint32_t invalidVersion = 999;
file.write(reinterpret_cast<char const*>(&invalidVersion), sizeof(invalidVersion));
}
break;
case CorruptionType::CorruptedSeparator1:
file.seekp(offsets.separator1Offset);
{
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::CorruptedSeparator2:
file.seekp(offsets.separator2Offset);
{
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::CorruptedSeparator3:
file.seekp(offsets.separator3Offset);
{
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::MapKeyCorrupted:
if (!offsets.mapEntries.empty()) {
file.seekp(offsets.mapEntries[0].keyOffset);
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::MapSeqCorrupted:
if (!offsets.mapEntries.empty()) {
file.seekp(offsets.mapEntries[0].seqOffset);
uint32_t const corruptSeq = std::numeric_limits<uint32_t>::max();
file.write(reinterpret_cast<char const*>(&corruptSeq), sizeof(corruptSeq));
}
break;
case CorruptionType::MapBlobSizeCorrupted:
if (!offsets.mapEntries.empty()) {
file.seekp(offsets.mapEntries[0].blobSizeOffset);
size_t const corruptSize = std::numeric_limits<size_t>::max();
file.write(reinterpret_cast<char const*>(&corruptSize), sizeof(corruptSize));
}
break;
case CorruptionType::MapBlobDataCorrupted:
if (!offsets.mapEntries.empty() && !dataView.map.begin()->second.blob.empty()) {
file.seekp(offsets.mapEntries[0].blobDataOffset);
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::DeletedKeyCorrupted:
if (!offsets.deletedEntries.empty()) {
file.seekp(offsets.deletedEntries[0].keyOffset);
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::DeletedSeqCorrupted:
if (!offsets.deletedEntries.empty()) {
file.seekp(offsets.deletedEntries[0].seqOffset);
uint32_t const corruptSeq = std::numeric_limits<uint32_t>::max();
file.write(reinterpret_cast<char const*>(&corruptSeq), sizeof(corruptSeq));
}
break;
case CorruptionType::DeletedBlobSizeCorrupted:
if (!offsets.deletedEntries.empty()) {
file.seekp(offsets.deletedEntries[0].blobSizeOffset);
size_t const corruptSize = std::numeric_limits<size_t>::max();
file.write(reinterpret_cast<char const*>(&corruptSize), sizeof(corruptSize));
}
break;
case CorruptionType::DeletedBlobDataCorrupted:
if (!offsets.deletedEntries.empty() && !dataView.deleted.begin()->second.blob.empty()) {
file.seekp(offsets.deletedEntries[0].blobDataOffset);
char corruptByte = static_cast<char>(0xFF);
file.write(&corruptByte, 1);
}
break;
case CorruptionType::HeaderLatestSeqCorrupted:
file.seekp(offsets.headerOffset + sizeof(uint32_t)); // skip version
{
uint32_t corruptSeq = 0; // set to 0 to fail validation if minLatestSequence > 0
file.write(reinterpret_cast<char const*>(&corruptSeq), sizeof(corruptSeq));
}
break;
}
}
static void
verifyDataEquals(LedgerCacheFile::Data const& expected, LedgerCacheFile::Data const& actual)
{
EXPECT_EQ(expected.latestSeq, actual.latestSeq);
EXPECT_EQ(expected.map.size(), actual.map.size());
EXPECT_EQ(expected.deleted.size(), actual.deleted.size());
for (auto const& [key, entry] : expected.map) {
auto it = actual.map.find(key);
ASSERT_NE(it, actual.map.end()) << "Key not found in actual map";
EXPECT_EQ(entry.seq, it->second.seq);
EXPECT_EQ(entry.blob, it->second.blob);
}
for (auto const& [key, entry] : expected.deleted) {
auto it = actual.deleted.find(key);
ASSERT_NE(it, actual.deleted.end()) << "Key not found in actual deleted";
EXPECT_EQ(entry.seq, it->second.seq);
EXPECT_EQ(entry.blob, it->second.blob);
}
}
};
std::vector<LedgerCacheFileTestBase::DataSizeParams> const LedgerCacheFileTestBase::kDATA_SIZE_PARAMS = {
{.mapEntries = 0, .deletedEntries = 0, .blobSize = 0, .description = "empty"},
{.mapEntries = 1, .deletedEntries = 0, .blobSize = 10, .description = "single_map_small_blob"},
{.mapEntries = 0, .deletedEntries = 1, .blobSize = 100, .description = "single_deleted_medium_blob"},
{.mapEntries = 5, .deletedEntries = 3, .blobSize = 1000, .description = "multiple_entries_large_blob"},
{.mapEntries = 10, .deletedEntries = 10, .blobSize = 50000, .description = "many_entries_huge_blob"}
};
std::vector<LedgerCacheFileTestBase::CorruptionParams> const LedgerCacheFileTestBase::kCORRUPTION_PARAMS = {
{.type = CorruptionType::InvalidVersion, .description = "invalid_version"},
{.type = CorruptionType::CorruptedSeparator1, .description = "corrupted_separator1"},
{.type = CorruptionType::CorruptedSeparator2, .description = "corrupted_separator2"},
{.type = CorruptionType::CorruptedSeparator3, .description = "corrupted_separator3"},
{.type = CorruptionType::MapKeyCorrupted, .description = "map_key_corrupted"},
{.type = CorruptionType::MapSeqCorrupted, .description = "map_seq_corrupted"},
{.type = CorruptionType::MapBlobSizeCorrupted, .description = "map_blob_size_corrupted"},
{.type = CorruptionType::MapBlobDataCorrupted, .description = "map_blob_data_corrupted"},
{.type = CorruptionType::DeletedKeyCorrupted, .description = "deleted_key_corrupted"},
{.type = CorruptionType::DeletedSeqCorrupted, .description = "deleted_seq_corrupted"},
{.type = CorruptionType::DeletedBlobSizeCorrupted, .description = "deleted_blob_size_corrupted"},
{.type = CorruptionType::DeletedBlobDataCorrupted, .description = "deleted_blob_data_corrupted"},
{.type = CorruptionType::HeaderLatestSeqCorrupted, .description = "header_latest_seq_corrupted"}
};
struct LedgerCacheFileTest : LedgerCacheFileTestBase,
::testing::WithParamInterface<LedgerCacheFileTestBase::DataSizeParams> {
static std::string
roundTripParamName(::testing::TestParamInfo<DataSizeParams> const& info)
{
return info.param.description;
}
};
INSTANTIATE_TEST_SUITE_P(
AllDataSizes,
LedgerCacheFileTest,
::testing::ValuesIn(LedgerCacheFileTestBase::kDATA_SIZE_PARAMS),
LedgerCacheFileTest::roundTripParamName
);
TEST_P(LedgerCacheFileTest, WriteAndReadData)
{
auto dataParams = GetParam();
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(dataParams.mapEntries, dataParams.deletedEntries, dataParams.blobSize);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value()) << "Failed to write: " << writeResult.error();
EXPECT_TRUE(std::filesystem::exists(tmpFile.path));
EXPECT_GT(std::filesystem::file_size(tmpFile.path), 0u);
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value()) << "Failed to read: " << readResult.error();
verifyDataEquals(testData, readResult.value());
}
struct LedgerCacheFileCorruptionTest : LedgerCacheFileTestBase,
::testing::WithParamInterface<LedgerCacheFileTestBase::CorruptionParams> {
static std::string
corruptionParamName(::testing::TestParamInfo<CorruptionParams> const& info)
{
return info.param.description;
}
};
INSTANTIATE_TEST_SUITE_P(
AllCorruptions,
LedgerCacheFileCorruptionTest,
::testing::ValuesIn(LedgerCacheFileTestBase::kCORRUPTION_PARAMS),
LedgerCacheFileCorruptionTest::corruptionParamName
);
TEST_P(LedgerCacheFileCorruptionTest, HandleCorruption)
{
auto corruptionParams = GetParam();
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(3, 2, 100);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value()) << "Failed to write: " << writeResult.error();
corruptFile(corruptionParams.type, dataView);
auto readResult = cacheFile.read(0);
EXPECT_FALSE(readResult.has_value()) << "Should have failed to read corrupted file";
std::string const& error = readResult.error();
switch (corruptionParams.type) {
case CorruptionType::InvalidVersion:
EXPECT_THAT(error, ::testing::HasSubstr("wrong version"));
break;
case CorruptionType::CorruptedSeparator1:
case CorruptionType::CorruptedSeparator2:
case CorruptionType::CorruptedSeparator3:
EXPECT_THAT(error, ::testing::HasSubstr("Separator verification failed"));
break;
case CorruptionType::MapKeyCorrupted:
case CorruptionType::MapSeqCorrupted:
EXPECT_FALSE(error.empty());
break;
case CorruptionType::MapBlobSizeCorrupted:
EXPECT_THAT(
error,
::testing::AnyOf(
::testing::HasSubstr("Error reading cache file"),
::testing::HasSubstr("Failed to read blob"),
::testing::HasSubstr("Hash file corruption detected")
)
);
break;
case CorruptionType::MapBlobDataCorrupted:
EXPECT_THAT(
error,
::testing::AnyOf(
::testing::HasSubstr("Hash file corruption detected"),
::testing::HasSubstr("Error reading cache file")
)
);
break;
case CorruptionType::DeletedKeyCorrupted:
case CorruptionType::DeletedSeqCorrupted:
EXPECT_FALSE(error.empty());
break;
case CorruptionType::DeletedBlobSizeCorrupted:
EXPECT_THAT(
error,
::testing::AnyOf(
::testing::HasSubstr("Error reading cache file"),
::testing::HasSubstr("Failed to read blob"),
::testing::HasSubstr("Hash file corruption detected")
)
);
break;
case CorruptionType::DeletedBlobDataCorrupted:
EXPECT_THAT(
error,
::testing::AnyOf(
::testing::HasSubstr("Hash file corruption detected"),
::testing::HasSubstr("Error reading cache file")
)
);
break;
case CorruptionType::HeaderLatestSeqCorrupted:
EXPECT_THAT(error, ::testing::HasSubstr("Hash file corruption detected"));
break;
}
}
struct LedgerCacheFileEdgeCaseTest : LedgerCacheFileTestBase {};
TEST_F(LedgerCacheFileEdgeCaseTest, NonExistingFile)
{
LedgerCacheFile invalidPathFile("/invalid/path/file.cache");
auto testData = createTestData(1, 1, 10);
auto dataView = toDataView(testData);
auto writeResult = invalidPathFile.write(dataView);
EXPECT_FALSE(writeResult.has_value());
EXPECT_THAT(writeResult.error(), ::testing::HasSubstr("Couldn't open file"));
auto readResult = invalidPathFile.read(0);
EXPECT_FALSE(readResult.has_value());
EXPECT_THAT(readResult.error(), ::testing::HasSubstr("Couldn't open file"));
}
TEST_F(LedgerCacheFileEdgeCaseTest, MaxSequenceNumber)
{
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(1, 1, 10);
testData.latestSeq = std::numeric_limits<uint32_t>::max();
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, ZeroSizedBlobs)
{
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(3, 2, 0);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, SpecialKeyPatterns)
{
LedgerCacheFile cacheFile(tmpFile.path);
LedgerCacheFile::Data testData;
testData.latestSeq = 100;
ripple::uint256 zeroKey;
std::memset(zeroKey.data(), 0, ripple::uint256::size());
testData.map.emplace(zeroKey, data::LedgerCache::CacheEntry{.seq = 1, .blob = {1, 2, 3}});
ripple::uint256 onesKey;
std::memset(onesKey.data(), 0xFF, ripple::uint256::size());
testData.map.emplace(onesKey, data::LedgerCache::CacheEntry{.seq = 2, .blob = {4, 5, 6}});
ripple::uint256 altKey;
for (size_t i = 0; i < ripple::uint256::size(); ++i) {
altKey.data()[i] = static_cast<unsigned char>(((i % 2) != 0u) ? 0xAA : 0x55);
}
testData.deleted.emplace(altKey, data::LedgerCache::CacheEntry{.seq = 3, .blob = {7, 8, 9}});
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, LargeBlobs)
{
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(1, 1, 1024 * 1024);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, SequenceNumber)
{
LedgerCacheFile cacheFile(tmpFile.path);
LedgerCacheFile::Data testData;
testData.latestSeq = 0;
ripple::uint256 key1, key2, key3;
std::memset(key1.data(), 1, ripple::uint256::size());
std::memset(key2.data(), 2, ripple::uint256::size());
std::memset(key3.data(), 3, ripple::uint256::size());
testData.map.emplace(key1, data::LedgerCache::CacheEntry{.seq = 0, .blob = {1}});
testData.map.emplace(key2, data::LedgerCache::CacheEntry{.seq = std::numeric_limits<uint32_t>::max(), .blob = {2}});
testData.deleted.emplace(
key3, data::LedgerCache::CacheEntry{.seq = std::numeric_limits<uint32_t>::max() / 2, .blob = {3}}
);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, OnlyMapEntries)
{
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(5, 0, 100);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, OnlyDeletedEntries)
{
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(0, 5, 100);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(0);
ASSERT_TRUE(readResult.has_value());
verifyDataEquals(testData, readResult.value());
}
TEST_F(LedgerCacheFileEdgeCaseTest, WriteCreatesFileWithSuffixNew)
{
// The test causes failure of rename operation by creating destination as directory
std::filesystem::remove(tmpFile.path);
std::filesystem::create_directory(tmpFile.path);
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(1, 1, 10);
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
EXPECT_FALSE(writeResult.has_value());
auto newFilePath = fmt::format("{}.new", tmpFile.path);
EXPECT_THAT(writeResult.error(), ::testing::HasSubstr(newFilePath));
EXPECT_TRUE(std::filesystem::exists(newFilePath));
EXPECT_TRUE(std::filesystem::is_regular_file(newFilePath));
}
struct LedgerCacheFileMinSequenceValidationParams {
uint32_t latestSeq;
uint32_t minLatestSeq;
bool shouldSucceed;
std::string testName;
};
struct LedgerCacheFileMinSequenceValidationTest
: LedgerCacheFileTestBase,
::testing::WithParamInterface<LedgerCacheFileMinSequenceValidationParams> {};
INSTANTIATE_TEST_SUITE_P(
LedgerCacheFileMinSequenceValidationTests,
LedgerCacheFileMinSequenceValidationTest,
::testing::Values(
LedgerCacheFileMinSequenceValidationParams{
.latestSeq = 1000u,
.minLatestSeq = 500u,
.shouldSucceed = true,
.testName = "accept_when_min_less_than_latest"
},
LedgerCacheFileMinSequenceValidationParams{
.latestSeq = 1000u,
.minLatestSeq = 2000u,
.shouldSucceed = false,
.testName = "reject_when_min_greater_than_latest"
},
LedgerCacheFileMinSequenceValidationParams{
.latestSeq = 1000u,
.minLatestSeq = 1000u,
.shouldSucceed = true,
.testName = "accept_when_min_equals_latest"
},
LedgerCacheFileMinSequenceValidationParams{
.latestSeq = 0u,
.minLatestSeq = 0u,
.shouldSucceed = true,
.testName = "accept_zero_sequence"
}
),
tests::util::kNAME_GENERATOR
);
TEST_P(LedgerCacheFileMinSequenceValidationTest, ValidateMinSequence)
{
auto const params = GetParam();
auto const latestSeq = params.latestSeq;
auto const minLatestSeq = params.minLatestSeq;
auto const shouldSucceed = params.shouldSucceed;
LedgerCacheFile cacheFile(tmpFile.path);
auto testData = createTestData(3, 2, 100);
testData.latestSeq = latestSeq;
auto dataView = toDataView(testData);
auto writeResult = cacheFile.write(dataView);
ASSERT_TRUE(writeResult.has_value());
auto readResult = cacheFile.read(minLatestSeq);
if (shouldSucceed) {
ASSERT_TRUE(readResult.has_value()) << "Expected read to succeed but got error: " << readResult.error();
EXPECT_EQ(readResult.value().latestSeq, latestSeq);
} else {
EXPECT_FALSE(readResult.has_value()) << "Expected read to fail but it succeeded";
EXPECT_THAT(readResult.error(), ::testing::HasSubstr("too low"));
}
}

View File

@@ -0,0 +1,169 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/impl/OutputFile.hpp"
#include "util/Shasum.hpp"
#include "util/TmpFile.hpp"
#include <gtest/gtest.h>
#include <xrpl/basics/base_uint.h>
#include <cstddef>
#include <cstdint>
#include <fstream>
#include <ios>
#include <iterator>
#include <numbers>
#include <string>
#include <vector>
using namespace data::impl;
struct OutputFileTest : ::testing::Test {
TmpFile tmpFile = TmpFile::empty();
std::string
readFileContents() const
{
std::ifstream ifs(tmpFile.path, std::ios::binary);
return std::string{std::istreambuf_iterator<char>{ifs}, std::istreambuf_iterator<char>{}};
}
};
TEST_F(OutputFileTest, ConstructorOpensFile)
{
OutputFile file(tmpFile.path);
EXPECT_TRUE(file.isOpen());
}
TEST_F(OutputFileTest, NonExistingFile)
{
std::string const invalidPath = "/invalid/nonexistent/directory/file.dat";
OutputFile file(invalidPath);
EXPECT_FALSE(file.isOpen());
}
TEST_F(OutputFileTest, WriteBasicTypes)
{
uint32_t const intValue = 0x12345678;
double const doubleValue = std::numbers::pi;
char const charValue = 'A';
{
OutputFile file(tmpFile.path);
file.write(intValue);
file.write(doubleValue);
file.write(charValue);
}
std::string contents = readFileContents();
EXPECT_EQ(contents.size(), sizeof(intValue) + sizeof(doubleValue) + sizeof(charValue));
auto* data = reinterpret_cast<char const*>(contents.data());
EXPECT_EQ(*reinterpret_cast<uint32_t const*>(data), intValue);
EXPECT_EQ(*reinterpret_cast<double const*>(data + sizeof(intValue)), doubleValue);
EXPECT_EQ(*(data + sizeof(intValue) + sizeof(doubleValue)), charValue);
}
TEST_F(OutputFileTest, WriteArray)
{
std::vector<uint32_t> const data = {0x11111111, 0x22222222, 0x33333333, 0x44444444};
{
OutputFile file(tmpFile.path);
file.write(data.data(), data.size() * sizeof(uint32_t));
}
std::string contents = readFileContents();
EXPECT_EQ(contents.size(), data.size() * sizeof(uint32_t));
auto* readData = reinterpret_cast<uint32_t const*>(contents.data());
for (size_t i = 0; i < data.size(); ++i) {
EXPECT_EQ(readData[i], data[i]);
}
}
TEST_F(OutputFileTest, WriteRawData)
{
std::string const testData = "Hello, World!";
{
OutputFile file(tmpFile.path);
file.writeRaw(testData.data(), testData.size());
}
std::string contents = readFileContents();
EXPECT_EQ(contents, testData);
}
TEST_F(OutputFileTest, WriteMultipleChunks)
{
std::string chunk1 = "First chunk";
std::string chunk2 = "Second chunk";
std::string chunk3 = "Third chunk";
{
OutputFile file(tmpFile.path);
file.writeRaw(chunk1.data(), chunk1.size());
file.writeRaw(chunk2.data(), chunk2.size());
file.writeRaw(chunk3.data(), chunk3.size());
}
std::string contents = readFileContents();
EXPECT_EQ(contents, chunk1 + chunk2 + chunk3);
}
TEST_F(OutputFileTest, HashOfEmptyFile)
{
OutputFile file(tmpFile.path);
ASSERT_TRUE(file.isOpen());
// Hash of empty file should match SHA256 of empty string
EXPECT_EQ(file.hash(), util::sha256sum(""));
}
TEST_F(OutputFileTest, HashAfterWriting)
{
std::string const testData = "Hello, World!";
{
OutputFile file(tmpFile.path);
file.writeRaw(testData.data(), testData.size());
// Hash should match SHA256 of the written data
EXPECT_EQ(file.hash(), util::sha256sum(testData));
}
}
TEST_F(OutputFileTest, HashProgressesWithWrites)
{
std::string const part1 = "Hello, ";
std::string const part2 = "World!";
std::string const combined = part1 + part2;
OutputFile file(tmpFile.path);
ASSERT_TRUE(file.isOpen());
EXPECT_EQ(file.hash(), util::sha256sum(""));
file.writeRaw(part1.data(), part1.size());
EXPECT_EQ(file.hash(), util::sha256sum(part1));
file.writeRaw(part2.data(), part2.size());
EXPECT_EQ(file.hash(), util::sha256sum(combined));
}

View File

@@ -25,6 +25,8 @@
#include <boost/json/parse.hpp>
#include <boost/json/value.hpp>
#include <fmt/core.h>
#include <fmt/format.h>
#include <gtest/gtest.h>
namespace json = boost::json;
@@ -42,7 +44,9 @@ generateDefaultCacheConfig()
{"cache.num_cursors_from_diff", ConfigValue{ConfigType::Integer}.defaultValue(0)},
{"cache.num_cursors_from_account", ConfigValue{ConfigType::Integer}.defaultValue(0)},
{"cache.page_fetch_size", ConfigValue{ConfigType::Integer}.defaultValue(512)},
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async")}}
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async")},
{"cache.file.path", ConfigValue{ConfigType::String}.optional()},
{"cache.file.max_sequence_age", ConfigValue{ConfigType::Integer}.defaultValue(5000)}}
};
}
@@ -134,3 +138,34 @@ TEST_F(CacheLoaderSettingsTest, NoLoadStyleCorrectlyPropagatedThroughConfig)
EXPECT_TRUE(settings.isDisabled());
}
}
TEST_F(CacheLoaderSettingsTest, CacheFilePathCorrectlyPropagatedThroughConfig)
{
static constexpr auto kCACHE_FILE_PATH = "/path/to/cache.dat";
auto const jsonStr = fmt::format(R"JSON({{"cache": {{"file": {{"path": "{}"}}}}}})JSON", kCACHE_FILE_PATH);
auto const cfg = getParseCacheConfig(json::parse(jsonStr));
auto const settings = makeCacheLoaderSettings(cfg);
ASSERT_TRUE(settings.cacheFileSettings.has_value());
EXPECT_EQ(settings.cacheFileSettings->path, kCACHE_FILE_PATH);
}
TEST_F(CacheLoaderSettingsTest, CacheFilePathNotSetWhenAbsentFromConfig)
{
auto const cfg = generateDefaultCacheConfig();
auto const settings = makeCacheLoaderSettings(cfg);
EXPECT_FALSE(settings.cacheFileSettings.has_value());
}
TEST_F(CacheLoaderSettingsTest, MaxSequenceLagPropagatedThoughConfig)
{
auto const seq = 1234;
auto const jsonStr =
fmt::format(R"JSON({{"cache": {{"file": {{"path": "doesnt_matter", "max_sequence_age": {} }}}}}})JSON", seq);
auto const cfg = getParseCacheConfig(json::parse(jsonStr));
auto const settings = makeCacheLoaderSettings(cfg);
ASSERT_TRUE(settings.cacheFileSettings.has_value());
EXPECT_EQ(settings.cacheFileSettings->maxAge, seq);
}

View File

@@ -33,9 +33,13 @@
#include <boost/json/parse.hpp>
#include <boost/json/value.hpp>
#include <fmt/format.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
namespace json = boost::json;
@@ -57,7 +61,9 @@ generateDefaultCacheConfig()
{"cache.num_cursors_from_diff", ConfigValue{ConfigType::Integer}.defaultValue(0)},
{"cache.num_cursors_from_account", ConfigValue{ConfigType::Integer}.defaultValue(0)},
{"cache.page_fetch_size", ConfigValue{ConfigType::Integer}.defaultValue(512)},
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async")}}
{"cache.load", ConfigValue{ConfigType::String}.defaultValue("async")},
{"cache.file.path", ConfigValue{ConfigType::String}.optional()},
{"cache.file.max_sequence_age", ConfigValue{ConfigType::Integer}.defaultValue(10)}}
};
}
@@ -90,18 +96,90 @@ INSTANTIATE_TEST_CASE_P(
CacheLoaderTest,
ParametrizedCacheLoaderTest,
Values(
Settings{.numCacheDiffs = 32, .numCacheMarkers = 48, .cachePageFetchSize = 512, .numThreads = 2},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 48, .cachePageFetchSize = 512, .numThreads = 4},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 48, .cachePageFetchSize = 512, .numThreads = 8},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 48, .cachePageFetchSize = 512, .numThreads = 16},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 128, .cachePageFetchSize = 24, .numThreads = 2},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 64, .cachePageFetchSize = 48, .numThreads = 4},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 48, .cachePageFetchSize = 64, .numThreads = 8},
Settings{.numCacheDiffs = 32, .numCacheMarkers = 24, .cachePageFetchSize = 128, .numThreads = 16},
Settings{.numCacheDiffs = 128, .numCacheMarkers = 128, .cachePageFetchSize = 24, .numThreads = 2},
Settings{.numCacheDiffs = 1024, .numCacheMarkers = 64, .cachePageFetchSize = 48, .numThreads = 4},
Settings{.numCacheDiffs = 512, .numCacheMarkers = 48, .cachePageFetchSize = 64, .numThreads = 8},
Settings{.numCacheDiffs = 64, .numCacheMarkers = 24, .cachePageFetchSize = 128, .numThreads = 16}
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 48,
.cachePageFetchSize = 512,
.numThreads = 2,
.cacheFileSettings = std::nullopt,
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 48,
.cachePageFetchSize = 512,
.numThreads = 4,
.cacheFileSettings = std::nullopt,
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 48,
.cachePageFetchSize = 512,
.numThreads = 8,
.cacheFileSettings = std::nullopt,
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 48,
.cachePageFetchSize = 512,
.numThreads = 16,
.cacheFileSettings = std::nullopt,
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 128,
.cachePageFetchSize = 24,
.numThreads = 2,
.cacheFileSettings = std::nullopt,
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 64,
.cachePageFetchSize = 48,
.numThreads = 4,
.cacheFileSettings = std::nullopt
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 48,
.cachePageFetchSize = 64,
.numThreads = 8,
.cacheFileSettings = std::nullopt
},
Settings{
.numCacheDiffs = 32,
.numCacheMarkers = 24,
.cachePageFetchSize = 128,
.numThreads = 16,
.cacheFileSettings = std::nullopt
},
Settings{
.numCacheDiffs = 128,
.numCacheMarkers = 128,
.cachePageFetchSize = 24,
.numThreads = 2,
.cacheFileSettings = std::nullopt
},
Settings{
.numCacheDiffs = 1024,
.numCacheMarkers = 64,
.cachePageFetchSize = 48,
.numThreads = 4,
.cacheFileSettings = std::nullopt
},
Settings{
.numCacheDiffs = 512,
.numCacheMarkers = 48,
.cachePageFetchSize = 64,
.numThreads = 8,
.cacheFileSettings = std::nullopt
},
Settings{
.numCacheDiffs = 64,
.numCacheMarkers = 24,
.cachePageFetchSize = 128,
.numThreads = 16,
.cacheFileSettings = std::nullopt
}
),
[](auto const& info) {
auto const settings = info.param;
@@ -285,3 +363,102 @@ TEST_F(CacheLoaderTest, DisabledCacheLoaderCanCallStopAndWait)
EXPECT_NO_THROW(loader.stop());
EXPECT_NO_THROW(loader.wait());
}
struct CacheLoaderFromFileTest : CacheLoaderTest {
CacheLoaderFromFileTest()
{
backend_->setRange(kSEQ - 20, kSEQ);
}
std::string const filePath = "./cache.bin";
uint32_t const maxSequenceLag = 10;
ClioConfigDefinition const cfg = getParseCacheConfig(
json::parse(
fmt::format(
R"JSON({{"cache": {{"load": "sync", "file": {{"path": "{}", "max_sequence_age": {}}}}}}})JSON",
filePath,
maxSequenceLag
)
)
);
CacheLoader<> loader{cfg, backend_, cache};
};
TEST_F(CacheLoaderFromFileTest, Success)
{
constexpr uint32_t kLOADED_SEQ = 12345;
EXPECT_CALL(cache, isFull).WillOnce(Return(false));
EXPECT_CALL(cache, loadFromFile(filePath, kSEQ - maxSequenceLag))
.WillOnce(Return(std::expected<void, std::string>{}));
EXPECT_CALL(cache, latestLedgerSequence).WillOnce(Return(kLOADED_SEQ));
loader.load(kSEQ);
std::optional<LedgerRange> const expectedLedgerRange =
LedgerRange{.minSequence = kSEQ - 20, .maxSequence = kLOADED_SEQ};
EXPECT_EQ(backend_->fetchLedgerRange(), expectedLedgerRange);
}
TEST_F(CacheLoaderFromFileTest, FailureBackToNormalLoad)
{
auto const diffs = diffProvider.getLatestDiff();
auto const loops = diffs.size() + 1;
auto const keysSize = 14;
EXPECT_CALL(cache, loadFromFile(filePath, kSEQ - maxSequenceLag))
.WillOnce(Return(std::expected<void, std::string>(std::unexpected("File not found"))));
EXPECT_CALL(*backend_, fetchLedgerDiff(_, _)).Times(32).WillRepeatedly(Return(diffs));
EXPECT_CALL(*backend_, doFetchSuccessorKey).Times(keysSize * loops).WillRepeatedly([this]() {
return diffProvider.nextKey(keysSize);
});
EXPECT_CALL(*backend_, doFetchLedgerObjects(_, kSEQ, _))
.Times(loops)
.WillRepeatedly(Return(std::vector<Blob>{keysSize - 1, Blob{'s'}}));
EXPECT_CALL(cache, isDisabled).WillRepeatedly(Return(false));
EXPECT_CALL(cache, updateImp).Times(loops);
EXPECT_CALL(cache, isFull).WillOnce(Return(false)).WillRepeatedly(Return(true));
EXPECT_CALL(cache, setFull).Times(1);
loader.load(kSEQ);
}
TEST_F(CacheLoaderFromFileTest, DontLoadWhenCacheIsDisabled)
{
auto const disabledCacheCfg =
getParseCacheConfig(json::parse(R"JSON({"cache": {"load": "none", "file": {"path": "/tmp/cache.bin"}}})JSON"));
CacheLoader loaderWithCacheDisabled{disabledCacheCfg, backend_, cache};
EXPECT_CALL(cache, isFull).WillOnce(Return(false));
EXPECT_CALL(cache, setDisabled);
loaderWithCacheDisabled.load(kSEQ);
}
TEST_F(CacheLoaderFromFileTest, MaxSequenceLagCalculation)
{
constexpr uint32_t kLOADED_SEQ = 12345;
EXPECT_CALL(cache, isFull).WillOnce(Return(false));
EXPECT_CALL(cache, loadFromFile(filePath, kSEQ - maxSequenceLag))
.WillOnce(Return(std::expected<void, std::string>{}));
EXPECT_CALL(cache, latestLedgerSequence).WillOnce(Return(kLOADED_SEQ));
loader.load(kSEQ);
}
TEST_F(CacheLoaderFromFileTest, MaxSequenceLagClampedToMinOfLedgerRange)
{
uint32_t const currentSeq = 110;
uint32_t const minSeq = currentSeq - maxSequenceLag + 10;
backend_->setRange(minSeq, currentSeq, true);
EXPECT_CALL(cache, isFull).WillOnce(Return(false));
EXPECT_CALL(cache, loadFromFile(filePath, minSeq)).WillOnce(Return(std::expected<void, std::string>{}));
EXPECT_CALL(cache, latestLedgerSequence).WillOnce(Return(minSeq + 1));
loader.load(currentSeq);
}

View File

@@ -0,0 +1,356 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2025, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/DBHelpers.hpp"
#include "data/Types.hpp"
#include "etl/SystemState.hpp"
#include "util/AsioContextTestFixture.hpp"
#include "util/MockBackendTestFixture.hpp"
#include "util/MockPrometheus.hpp"
#include "util/MockSubscriptionManager.hpp"
#include "util/TestObject.hpp"
#include "util/config/ConfigDefinition.hpp"
#include <etlng/impl/LedgerPublisher.hpp>
#include <fmt/format.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <xrpl/basics/chrono.h>
#include <xrpl/protocol/Indexes.h>
#include <xrpl/protocol/LedgerHeader.h>
#include <chrono>
#include <optional>
#include <vector>
using namespace testing;
using namespace etlng;
using namespace data;
using namespace std::chrono;
namespace {
constexpr auto kACCOUNT = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn";
constexpr auto kACCOUNT2 = "rLEsXccBGNR3UPuPu2hUXPjziKC3qKSBun";
constexpr auto kLEDGER_HASH = "4BC50C9B0D8515D3EAAE1E74B29A95804346C491EE1A95BF25E4AAB854A6A652";
constexpr auto kSEQ = 30;
constexpr auto kAGE = 800;
constexpr auto kAMOUNT = 100;
constexpr auto kFEE = 3;
constexpr auto kFINAL_BALANCE = 110;
constexpr auto kFINAL_BALANCE2 = 30;
MATCHER_P(ledgerHeaderMatcher, expectedHeader, "Headers match")
{
return arg.seq == expectedHeader.seq && arg.hash == expectedHeader.hash &&
arg.closeTime == expectedHeader.closeTime;
}
} // namespace
struct ETLLedgerPublisherNgTest : util::prometheus::WithPrometheus, MockBackendTestStrict, SyncAsioContextTest {
util::config::ClioConfigDefinition cfg{{}};
StrictMockSubscriptionManagerSharedPtr mockSubscriptionManagerPtr;
};
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderSkipDueToAge)
{
// Use kAGE (800) which is > MAX_LEDGER_AGE_SECONDS (600) to test skipping
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
auto dummyState = etl::SystemState{};
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
backend_->setRange(kSEQ - 1, kSEQ);
publisher.publish(dummyLedgerHeader);
// Verify last published sequence is set immediately
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
// Since age > MAX_LEDGER_AGE_SECONDS, these should not be called
EXPECT_CALL(*backend_, doFetchLedgerObject).Times(0);
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).Times(0);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
ctx_.run();
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderWithinAgeLimit)
{
// Use age 0 which is < MAX_LEDGER_AGE_SECONDS to ensure publishing happens
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
auto dummyState = etl::SystemState{};
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
backend_->setRange(kSEQ - 1, kSEQ);
publisher.publish(dummyLedgerHeader);
// Verify last published sequence is set immediately
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 0));
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
ctx_.run();
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderIsWritingTrue)
{
auto dummyState = etl::SystemState{};
dummyState.isWriting = true;
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
publisher.publish(dummyLedgerHeader);
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
ctx_.run();
EXPECT_FALSE(backend_->fetchLedgerRange());
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderInRange)
{
auto dummyState = etl::SystemState{};
dummyState.isWriting = true;
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
backend_->setRange(kSEQ - 1, kSEQ);
publisher.publish(dummyLedgerHeader);
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
TransactionAndMetadata t1;
t1.transaction =
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2)
.getSerializer()
.peekData();
t1.ledgerSequence = kSEQ;
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger).WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
// mock 1 transaction
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
ctx_.run();
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerHeaderCloseTimeGreaterThanNow)
{
auto dummyState = etl::SystemState{};
dummyState.isWriting = true;
auto dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
auto const nowPlus10 = system_clock::now() + seconds(10);
auto const closeTime = duration_cast<seconds>(nowPlus10.time_since_epoch()).count() - kRIPPLE_EPOCH_START;
dummyLedgerHeader.closeTime = ripple::NetClock::time_point{seconds{closeTime}};
backend_->setRange(kSEQ - 1, kSEQ);
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
publisher.publish(dummyLedgerHeader);
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
TransactionAndMetadata t1;
t1.transaction =
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2)
.getSerializer()
.peekData();
t1.ledgerSequence = kSEQ;
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1}));
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 1));
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction);
ctx_.run();
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsTrue)
{
auto dummyState = etl::SystemState{};
dummyState.isStopping = true;
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
EXPECT_FALSE(publisher.publish(kSEQ, {}));
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqMaxAttempt)
{
auto dummyState = etl::SystemState{};
dummyState.isStopping = false;
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
static constexpr auto kMAX_ATTEMPT = 2;
LedgerRange const range{.minSequence = kSEQ - 1, .maxSequence = kSEQ - 1};
EXPECT_CALL(*backend_, hardFetchLedgerRange).Times(kMAX_ATTEMPT).WillRepeatedly(Return(range));
EXPECT_FALSE(publisher.publish(kSEQ, kMAX_ATTEMPT, std::chrono::milliseconds{1}));
}
TEST_F(ETLLedgerPublisherNgTest, PublishLedgerSeqStopIsFalse)
{
auto dummyState = etl::SystemState{};
dummyState.isStopping = false;
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
LedgerRange const range{.minSequence = kSEQ, .maxSequence = kSEQ};
EXPECT_CALL(*backend_, hardFetchLedgerRange).WillOnce(Return(range));
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, kAGE);
EXPECT_CALL(*backend_, fetchLedgerBySequence(kSEQ, _)).WillOnce(Return(dummyLedgerHeader));
EXPECT_TRUE(publisher.publish(kSEQ, {}));
ctx_.run();
}
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleTxInOrder)
{
auto dummyState = etl::SystemState{};
dummyState.isWriting = true;
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 0); // age is 0
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
backend_->setRange(kSEQ - 1, kSEQ);
publisher.publish(dummyLedgerHeader);
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
// t1 index > t2 index
TransactionAndMetadata t1;
t1.transaction =
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
t1.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2, 2)
.getSerializer()
.peekData();
t1.ledgerSequence = kSEQ;
t1.date = 1;
TransactionAndMetadata t2;
t2.transaction =
createPaymentTransactionObject(kACCOUNT, kACCOUNT2, kAMOUNT, kFEE, kSEQ).getSerializer().peekData();
t2.metadata = createPaymentTransactionMetaObject(kACCOUNT, kACCOUNT2, kFINAL_BALANCE, kFINAL_BALANCE2, 1)
.getSerializer()
.peekData();
t2.ledgerSequence = kSEQ;
t2.date = 2;
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
.WillOnce(Return(std::vector<TransactionAndMetadata>{t1, t2}));
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(_, _, fmt::format("{}-{}", kSEQ - 1, kSEQ), 2));
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges);
Sequence const s;
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t2, _)).InSequence(s);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction(t1, _)).InSequence(s);
ctx_.run();
EXPECT_TRUE(publisher.lastPublishAgeSeconds() <= 1);
}
TEST_F(ETLLedgerPublisherNgTest, PublishVeryOldLedgerShouldSkip)
{
auto dummyState = etl::SystemState{};
dummyState.isWriting = true;
// Create a ledger header with age (800) greater than MAX_LEDGER_AGE_SECONDS (600)
auto const dummyLedgerHeader = createLedgerHeader(kLEDGER_HASH, kSEQ, 800);
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
backend_->setRange(kSEQ - 1, kSEQ);
publisher.publish(dummyLedgerHeader);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger).Times(0);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges).Times(0);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubTransaction).Times(0);
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ);
ctx_.run();
}
TEST_F(ETLLedgerPublisherNgTest, PublishMultipleLedgersInQuickSuccession)
{
auto dummyState = etl::SystemState{};
dummyState.isWriting = true;
auto const dummyLedgerHeader1 = createLedgerHeader(kLEDGER_HASH, kSEQ, 0);
auto const dummyLedgerHeader2 = createLedgerHeader(kLEDGER_HASH, kSEQ + 1, 0);
auto publisher = etlng::impl::LedgerPublisher(ctx_, backend_, mockSubscriptionManagerPtr, dummyState);
backend_->setRange(kSEQ - 1, kSEQ + 1);
// Publish two ledgers in quick succession
publisher.publish(dummyLedgerHeader1);
publisher.publish(dummyLedgerHeader2);
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ, _))
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
EXPECT_CALL(*backend_, doFetchLedgerObject(ripple::keylet::fees().key, kSEQ + 1, _))
.WillOnce(Return(createLegacyFeeSettingBlob(1, 2, 3, 4, 0)));
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ, _))
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
EXPECT_CALL(*backend_, fetchAllTransactionsInLedger(kSEQ + 1, _))
.WillOnce(Return(std::vector<TransactionAndMetadata>{}));
Sequence const s;
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(ledgerHeaderMatcher(dummyLedgerHeader1), _, _, _)).InSequence(s);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges(ledgerHeaderMatcher(dummyLedgerHeader1), _)).InSequence(s);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubLedger(ledgerHeaderMatcher(dummyLedgerHeader2), _, _, _)).InSequence(s);
EXPECT_CALL(*mockSubscriptionManagerPtr, pubBookChanges(ledgerHeaderMatcher(dummyLedgerHeader2), _)).InSequence(s);
EXPECT_TRUE(publisher.getLastPublishedSequence());
EXPECT_EQ(publisher.getLastPublishedSequence().value(), kSEQ + 1);
ctx_.run();
}

View File

@@ -22,6 +22,10 @@
#include <gtest/gtest.h>
#include <xrpl/basics/base_uint.h>
#include <cstdint>
#include <string>
#include <utility>
using namespace util;
struct ShasumTest : testing::Test {
@@ -45,3 +49,60 @@ TEST_F(ShasumTest, sha256sumString)
EXPECT_EQ(sha256sumString(""), kEMPTY_HASH);
EXPECT_EQ(sha256sumString("hello world"), kHELLO_WORLD_HASH);
}
TEST_F(ShasumTest, Sha256sumStreamingEmpty)
{
Sha256sum hasher;
auto result = std::move(hasher).finalize();
ripple::uint256 expected;
ASSERT_TRUE(expected.parseHex(kEMPTY_HASH));
EXPECT_EQ(result, expected);
}
TEST_F(ShasumTest, Sha256sumStreamingSingleUpdate)
{
Sha256sum hasher;
std::string data = "hello world";
hasher.update(data.data(), data.size());
auto result = std::move(hasher).finalize();
ripple::uint256 expected;
ASSERT_TRUE(expected.parseHex(kHELLO_WORLD_HASH));
EXPECT_EQ(result, expected);
}
TEST_F(ShasumTest, Sha256sumStreamingMultipleUpdates)
{
Sha256sum hasher;
hasher.update("hello", 5);
hasher.update(" ", 1);
hasher.update("world", 5);
auto result = std::move(hasher).finalize();
ripple::uint256 expected;
ASSERT_TRUE(expected.parseHex(kHELLO_WORLD_HASH));
EXPECT_EQ(result, expected);
}
TEST_F(ShasumTest, Sha256sumUpdateTemplate)
{
Sha256sum hasher;
uint32_t value32 = 0x12345678;
uint64_t value64 = 0x123456789ABCDEF0;
hasher.update(value32);
hasher.update(value64);
auto result1 = std::move(hasher).finalize();
// Verify same result with raw data
Sha256sum hasher2;
hasher2.update(&value32, sizeof(value32));
hasher2.update(&value64, sizeof(value64));
auto result2 = std::move(hasher2).finalize();
EXPECT_EQ(result1, result2);
}