Compare commits

..

5 Commits

Author SHA1 Message Date
Bronek Kozicki
ba2ac69f55 Add hardcoded hash test back 2025-07-14 17:32:15 +01:00
Bronek Kozicki
242b11f314 For discussion 2025-07-14 11:03:20 +01:00
Bronek Kozicki
6af70476e8 Merge branch 'develop' into Bronek/maximum_feature_name_size 2025-07-14 10:38:39 +01:00
Bronek Kozicki
4152dc53ba Shrink to 31 bytes, enforced in compilation 2025-07-11 15:00:39 +01:00
Bronek Kozicki
eb95da9cd3 Enforce maximum feature name size 2025-07-11 14:24:31 +01:00
6 changed files with 123 additions and 16 deletions

View File

@@ -84,6 +84,12 @@
namespace ripple {
// We do not want feature names to exceed this size.
static constexpr std::size_t maxFeatureNameSize = 63;
// We not want feature names of this length (and + 1), to enable the use of
// 32-long byte string for selection of feature as uint256, in WASM
static constexpr std::size_t reservedFeatureNameSize = 32;
enum class VoteBehavior : int { Obsolete = -1, DefaultNo = 0, DefaultYes };
enum class AmendmentSupport : int { Retired = -1, Supported = 0, Unsupported };

View File

@@ -443,10 +443,22 @@ featureToName(uint256 const& f)
#pragma push_macro("XRPL_ABANDON")
#undef XRPL_ABANDON
template <std::size_t N>
constexpr auto
enforceMaxFeatureNameSize(char const (&n)[N]) -> char const*
{
static_assert(N != reservedFeatureNameSize);
static_assert(N != reservedFeatureNameSize + 1);
static_assert(N <= maxFeatureNameSize);
return n;
}
#define XRPL_FEATURE(name, supported, vote) \
uint256 const feature##name = registerFeature(#name, supported, vote);
#define XRPL_FIX(name, supported, vote) \
uint256 const fix##name = registerFeature("fix" #name, supported, vote);
uint256 const feature##name = \
registerFeature(enforceMaxFeatureNameSize(#name), supported, vote);
#define XRPL_FIX(name, supported, vote) \
uint256 const fix##name = registerFeature( \
enforceMaxFeatureNameSize("fix" #name), supported, vote);
// clang-format off
#define XRPL_RETIRE(name) \

View File

@@ -22,6 +22,7 @@
#include <xrpld/app/misc/AmendmentTable.h>
#include <xrpl/protocol/Feature.h>
#include <xrpl/protocol/digest.h>
#include <xrpl/protocol/jss.h>
namespace ripple {
@@ -202,16 +203,20 @@ class Feature_test : public beast::unit_test::suite
using namespace test::jtx;
Env env{*this};
auto jrr = env.rpc("feature", "MultiSignReserve")[jss::result];
std::string const name = "MultiSignReserve";
auto jrr = env.rpc("feature", name)[jss::result];
BEAST_EXPECTS(jrr[jss::status] == jss::success, "status");
jrr.removeMember(jss::status);
BEAST_EXPECT(jrr.size() == 1);
BEAST_EXPECT(
jrr.isMember("586480873651E106F1D6339B0C4A8945BA705A777F3F4524626FF"
"1FC07EFE41D"));
auto const expected =
to_string(sha512Half(Slice(name.data(), name.size())));
char const sha[] =
"586480873651E106F1D6339B0C4A8945BA705A777F3F4524626FF1FC07EFE41D";
BEAST_EXPECT(expected == sha);
BEAST_EXPECT(jrr.isMember(expected));
auto feature = *(jrr.begin());
BEAST_EXPECTS(feature[jss::name] == "MultiSignReserve", "name");
BEAST_EXPECTS(feature[jss::name] == name, "name");
BEAST_EXPECTS(!feature[jss::enabled].asBool(), "enabled");
BEAST_EXPECTS(
feature[jss::vetoed].isBool() && !feature[jss::vetoed].asBool(),

View File

@@ -104,7 +104,7 @@ Workers::setNumberOfThreads(int numberOfThreads)
++m_pauseCount;
// Pausing a thread counts as one "internal task"
m_semaphore.release();
m_semaphore.notify();
}
}
@@ -128,7 +128,7 @@ Workers::stop()
void
Workers::addTask()
{
m_semaphore.release();
m_semaphore.notify();
}
int
@@ -213,7 +213,7 @@ Workers::Worker::run()
// Acquire a task or "internal task."
//
m_workers.m_semaphore.acquire();
m_workers.m_semaphore.wait();
// See if there's a pause request. This
// counts as an "internal task."

View File

@@ -20,12 +20,13 @@
#ifndef RIPPLE_CORE_WORKERS_H_INCLUDED
#define RIPPLE_CORE_WORKERS_H_INCLUDED
#include <xrpld/core/detail/semaphore.h>
#include <xrpl/beast/core/LockFreeStack.h>
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <semaphore>
#include <string>
#include <thread>
@@ -222,10 +223,10 @@ private:
std::condition_variable m_cv; // signaled when all threads paused
std::mutex m_mut;
bool m_allPaused;
std::counting_semaphore<> m_semaphore; // each pending task is 1 resource
int m_numberOfThreads; // how many we want active now
std::atomic<int> m_activeCount; // to know when all are paused
std::atomic<int> m_pauseCount; // how many threads need to pause now
semaphore m_semaphore; // each pending task is 1 resource
int m_numberOfThreads; // how many we want active now
std::atomic<int> m_activeCount; // to know when all are paused
std::atomic<int> m_pauseCount; // how many threads need to pause now
std::atomic<int>
m_runningTaskCount; // how many calls to processTask() active
beast::LockFreeStack<Worker> m_everyone; // holds all created workers

View File

@@ -0,0 +1,83 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_CORE_SEMAPHORE_H_INCLUDED
#define RIPPLE_CORE_SEMAPHORE_H_INCLUDED
#include <condition_variable>
#include <mutex>
namespace ripple {
template <class Mutex, class CondVar>
class basic_semaphore
{
private:
Mutex m_mutex;
CondVar m_cond;
std::size_t m_count;
public:
using size_type = std::size_t;
/** Create the semaphore, with an optional initial count.
If unspecified, the initial count is zero.
*/
explicit basic_semaphore(size_type count = 0) : m_count(count)
{
}
/** Increment the count and unblock one waiting thread. */
void
notify()
{
std::lock_guard lock{m_mutex};
++m_count;
m_cond.notify_one();
}
/** Block until notify is called. */
void
wait()
{
std::unique_lock lock{m_mutex};
while (m_count == 0)
m_cond.wait(lock);
--m_count;
}
/** Perform a non-blocking wait.
@return `true` If the wait would be satisfied.
*/
bool
try_wait()
{
std::lock_guard lock{m_mutex};
if (m_count == 0)
return false;
--m_count;
return true;
}
};
using semaphore = basic_semaphore<std::mutex, std::condition_variable>;
} // namespace ripple
#endif