impliment cursor for book_offers

This commit is contained in:
Nathan Nichols
2021-04-26 15:35:29 -05:00
parent 0bcf3a4601
commit 3c8ecc01f1
2 changed files with 32 additions and 12 deletions

View File

@@ -266,8 +266,11 @@ doBookOffers(
ripple::uint256 bookBase = getBookBase(book); ripple::uint256 bookBase = getBookBase(book);
auto start = std::chrono::system_clock::now(); auto start = std::chrono::system_clock::now();
std::cout << "getting Book Offers" << std::endl;
auto [offers, retCursor] = auto [offers, retCursor] =
backend.fetchBookOffers(bookBase, *ledgerSequence, limit, cursor); backend.fetchBookOffers(bookBase, *ledgerSequence, limit, cursor);
std::cout << "got Book Offers" << std::endl;
auto end = std::chrono::system_clock::now(); auto end = std::chrono::system_clock::now();
BOOST_LOG_TRIVIAL(warning) << "Time loading books from Postgres: " BOOST_LOG_TRIVIAL(warning) << "Time loading books from Postgres: "
@@ -281,7 +284,7 @@ doBookOffers(
std::move_iterator(offers.begin()), std::move_iterator(offers.begin()),
std::move_iterator(offers.end()), std::move_iterator(offers.end()),
std::back_inserter(jsonOffers), std::back_inserter(jsonOffers),
[](auto obj) { [](auto&& obj) {
try try
{ {
ripple::SerialIter it{obj.blob.data(), obj.blob.size()}; ripple::SerialIter it{obj.blob.data(), obj.blob.size()};

View File

@@ -666,14 +666,26 @@ CassandraBackend::fetchBookOffers(
BOOST_LOG_TRIVIAL(info) << __func__ << " upper = " << std::to_string(upper) BOOST_LOG_TRIVIAL(info) << __func__ << " upper = " << std::to_string(upper)
<< " book = " << ripple::strHex(std::string((char*)book.data(), 24)); << " book = " << ripple::strHex(std::string((char*)book.data(), 24));
// ripple::uint256 zero = {}; if (cursor)
// statement.bindBytes(zero.data(), 8); {
// if (cursor) auto object = fetchLedgerObject(*cursor, sequence);
// statement.bindBytes(*cursor);
// else if(!object)
// { return {{}, {}};
// statement.bindBytes(zero);
// } ripple::SerialIter it{object->data(), object->size()};
ripple::SLE offer{it, *cursor};
ripple::uint256 bookDir = offer.getFieldH256(ripple::sfBookDirectory);
statement.bindBytes(bookDir.data() + 24, 8);
statement.bindBytes(*cursor);
}
else
{
ripple::uint256 zero = beast::zero;
statement.bindBytes(zero.data(), 8);
statement.bindBytes(zero);
}
// statement.bindUInt(limit); // statement.bindUInt(limit);
CassandraResult result = executeSyncRead(statement); CassandraResult result = executeSyncRead(statement);
@@ -681,6 +693,7 @@ CassandraBackend::fetchBookOffers(
BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys"; BOOST_LOG_TRIVIAL(debug) << __func__ << " - got keys";
if (!result) if (!result)
{ {
std::cout << "could not sync read" << std::endl;
return {{}, {}}; return {{}, {}};
} }
@@ -694,6 +707,7 @@ CassandraBackend::fetchBookOffers(
BOOST_LOG_TRIVIAL(debug) BOOST_LOG_TRIVIAL(debug)
<< __func__ << " - populated keys. num keys = " << keys.size(); << __func__ << " - populated keys. num keys = " << keys.size();
std::cout << keys.size() << std::endl;
if (!keys.size()) if (!keys.size())
return {{}, {}}; return {{}, {}};
@@ -701,11 +715,13 @@ CassandraBackend::fetchBookOffers(
std::vector<Blob> objs = fetchLedgerObjects(keys, sequence); std::vector<Blob> objs = fetchLedgerObjects(keys, sequence);
for (size_t i = 0; i < objs.size(); ++i) for (size_t i = 0; i < objs.size(); ++i)
{ {
if (results.size() == limit)
return {results, keys[i]};
if (objs[i].size() != 0) if (objs[i].size() != 0)
{
if (results.size() == limit)
return {results, keys[i]};
results.push_back({keys[i], objs[i]}); results.push_back({keys[i], objs[i]});
}
} }
return {results, {}}; return {results, {}};
@@ -1634,6 +1650,7 @@ CassandraBackend::open()
query.str(""); query.str("");
query << "SELECT quality_key FROM " << tablePrefix << "books2 " query << "SELECT quality_key FROM " << tablePrefix << "books2 "
<< " WHERE book = ? AND sequence = ?" << " WHERE book = ? AND sequence = ?"
<< " AND quality_key >= (?, ?)"
" ORDER BY quality_key ASC"; " ORDER BY quality_key ASC";
if (!selectBook_.prepareStatement(query, session_.get())) if (!selectBook_.prepareStatement(query, session_.get()))
continue; continue;