refactor: Update to Boost 1.88 (#5570)

This updates Boost to 1.88, which is needed because Clio wants to move to 1.88 as that fixes several ASAN false positives around coroutine usage. In order for Clio to move to newer boost, libXRPL needs to move too. Hence the changes in this PR. A lot has changed between 1.83 and 1.88 so there are lots of changes in the diff, especially in regards to Boost.Asio and coroutines in particular.
This commit is contained in:
Alex Kremer
2025-08-27 10:34:50 +01:00
committed by GitHub
parent 808c86663c
commit 1506e65558
78 changed files with 871 additions and 516 deletions

View File

@@ -23,7 +23,8 @@
#include <xrpl/beast/utility/instrumentation.h>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/post.hpp>
#include <chrono>
#include <condition_variable>
@@ -32,7 +33,7 @@
namespace beast {
/** Measures handler latency on an io_service queue. */
/** Measures handler latency on an io_context queue. */
template <class Clock>
class io_latency_probe
{
@@ -44,12 +45,12 @@ private:
std::condition_variable_any m_cond;
std::size_t m_count;
duration const m_period;
boost::asio::io_service& m_ios;
boost::asio::io_context& m_ios;
boost::asio::basic_waitable_timer<std::chrono::steady_clock> m_timer;
bool m_cancel;
public:
io_latency_probe(duration const& period, boost::asio::io_service& ios)
io_latency_probe(duration const& period, boost::asio::io_context& ios)
: m_count(1)
, m_period(period)
, m_ios(ios)
@@ -64,16 +65,16 @@ public:
cancel(lock, true);
}
/** Return the io_service associated with the latency probe. */
/** Return the io_context associated with the latency probe. */
/** @{ */
boost::asio::io_service&
get_io_service()
boost::asio::io_context&
get_io_context()
{
return m_ios;
}
boost::asio::io_service const&
get_io_service() const
boost::asio::io_context const&
get_io_context() const
{
return m_ios;
}
@@ -109,8 +110,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), false, this));
}
/** Initiate continuous i/o latency sampling.
@@ -124,8 +127,10 @@ public:
std::lock_guard lock(m_mutex);
if (m_cancel)
throw std::logic_error("io_latency_probe is canceled");
m_ios.post(sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
boost::asio::post(
m_ios,
sample_op<Handler>(
std::forward<Handler>(handler), Clock::now(), true, this));
}
private:
@@ -236,12 +241,13 @@ private:
// The latency is too high to maintain the desired
// period so don't bother with a timer.
//
m_probe->m_ios.post(
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
else
{
m_probe->m_timer.expires_from_now(when - now);
m_probe->m_timer.expires_after(when - now);
m_probe->m_timer.async_wait(
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
@@ -254,7 +260,8 @@ private:
if (!m_probe)
return;
typename Clock::time_point const now(Clock::now());
m_probe->m_ios.post(
boost::asio::post(
m_probe->m_ios,
sample_op<Handler>(m_handler, now, m_repeat, m_probe));
}
};