Antithesis instrumentation improvements (#5213)

* Rename ASSERT to XRPL_ASSERT
* Upgrade to Anthithesis SDK 0.4.4, and use new 0.4.4 features
  * automatic cast to bool, like assert
* Add instrumentation workflow to verify build with instrumentation enabled
This commit is contained in:
Bronek Kozicki
2024-12-16 22:48:33 +00:00
committed by GitHub
parent ea1fffeebf
commit eabca8439f
223 changed files with 1452 additions and 1344 deletions

View File

@@ -112,7 +112,7 @@ public:
operator=(Slice s)
{
// Ensure the slice isn't a subset of the buffer.
ASSERT(
XRPL_ASSERT(
s.size() == 0 || size_ == 0 || s.data() < p_.get() ||
s.data() >= p_.get() + size_,
"ripple::Buffer::operator=(Slice) : input not a subset");

View File

@@ -43,7 +43,7 @@ namespace ripple {
constexpr std::size_t
calculatePercent(std::size_t count, std::size_t total)
{
assert(total != 0); // NOTE No ASSERT here, because constexpr
assert(total != 0); // NOTE No XRPL_ASSERT here, because constexpr
return ((std::min(count, total) * 100) + total - 1) / total;
}

View File

@@ -143,7 +143,7 @@ class SlabAllocator
void
deallocate(std::uint8_t* ptr) noexcept
{
ASSERT(
XRPL_ASSERT(
own(ptr),
"ripple::SlabAllocator::SlabBlock::deallocate : own input");
@@ -188,7 +188,7 @@ public:
boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_))
, slabSize_(alloc)
{
ASSERT(
XRPL_ASSERT(
(itemAlignment_ & (itemAlignment_ - 1)) == 0,
"ripple::SlabAllocator::SlabAllocator : valid alignment");
}
@@ -300,8 +300,8 @@ public:
bool
deallocate(std::uint8_t* ptr) noexcept
{
ASSERT(
ptr != nullptr,
XRPL_ASSERT(
ptr,
"ripple::SlabAllocator::SlabAllocator::deallocate : non-null "
"input");

View File

@@ -103,7 +103,7 @@ public:
std::uint8_t
operator[](std::size_t i) const noexcept
{
ASSERT(
XRPL_ASSERT(
i < size_,
"ripple::Slice::operator[](std::size_t) const : valid input");
return data_[i];

View File

@@ -291,7 +291,7 @@ public:
std::is_trivially_copyable<typename Container::value_type>::value>>
explicit base_uint(Container const& c)
{
ASSERT(
XRPL_ASSERT(
c.size() * sizeof(typename Container::value_type) == size(),
"ripple::base_uint::base_uint(Container auto) : input size match");
std::memcpy(data_.data(), c.data(), size());
@@ -304,7 +304,7 @@ public:
base_uint&>
operator=(Container const& c)
{
ASSERT(
XRPL_ASSERT(
c.size() * sizeof(typename Container::value_type) == size(),
"ripple::base_uint::operator=(Container auto) : input size match");
std::memcpy(data_.data(), c.data(), size());

View File

@@ -258,8 +258,8 @@ public:
? *partitions
: std::thread::hardware_concurrency();
map_.resize(partitions_);
ASSERT(
partitions_ != 0,
XRPL_ASSERT(
partitions_,
"ripple::partitioned_unordered_map::partitioned_unordered_map : "
"nonzero partitions");
}

View File

@@ -114,7 +114,7 @@ std::enable_if_t<
Integral>
rand_int(Engine& engine, Integral min, Integral max)
{
ASSERT(max > min, "ripple::rand_int : max over min inputs");
XRPL_ASSERT(max > min, "ripple::rand_int : max over min inputs");
// This should have no state and constructing it should
// be very cheap. If that turns out not to be the case

View File

@@ -235,7 +235,7 @@ public:
explicit scope_unlock(std::unique_lock<Mutex>& lock) noexcept(true)
: plock(&lock)
{
ASSERT(
XRPL_ASSERT(
plock->owns_lock(),
"ripple::scope_unlock::scope_unlock : mutex must be locked");
plock->unlock();

View File

@@ -117,7 +117,7 @@ public:
packed_spinlock(std::atomic<T>& lock, int index)
: bits_(lock), mask_(static_cast<T>(1) << index)
{
ASSERT(
XRPL_ASSERT(
index >= 0 && (mask_ != 0),
"ripple::packed_spinlock::packed_spinlock : valid index and mask");
}