Performance logging and counters:

* Tally and duration counters for Job Queue tasks and RPC calls
    optionally rendered by server_info and server_state, and
    optionally printed to a distinct log file.
    - Tally each Job Queue task as it is queued, starts, and
      finishes running. Track total duration queued and running.
    - Tally each RPC call as it starts and either finishes
      successfully or throws an exception. Track total running
      duration for each.
  * Track currently executing Job Queue tasks and RPC methods
    along with durations.
  * Json-formatted performance log file written by a dedicated
    thread, for above-described data.
  * New optional parameter, "counters", for server_info and
    server_state. If set, render Job Queue and RPC call counters
    as well as currently executing tasks.
  * New configuration section, "[perf]", to optionally control
    performance logging to a file.
  * Support optional sub-second periods when rendering human-readable
    time points.
This commit is contained in:
Mark Travis
2018-01-13 04:02:43 -08:00
committed by Nikolaos D. Bougalis
parent ef3bc92b82
commit 8eb8c77886
45 changed files with 10379 additions and 577 deletions

View File

@@ -930,6 +930,24 @@
# address=192.168.0.95:4201
# prefix=my_validator
#
# [perf]
#
# Configuration of performance logging. If enabled, write Json-formatted
# performance-oriented data periodically to a distinct log file.
#
# "perf_log" A string specifying the pathname of the performance log
# file. A relative pathname will log relative to the
# configuration directory. Required to enable
# performance logging.
#
# "log_interval" Integer value for number of seconds between writing
# to performance log. Default 1.
#
# Example:
# [perf]
# perf_log=/var/log/rippled/perf.log
# log_interval=2
#
#-------------------------------------------------------------------------------
#
# 7. Voting

View File

@@ -1,200 +0,0 @@
//
// Copyright (c) 2015-2016 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BASIC_SECONDS_CLOCK_HPP
#define BASIC_SECONDS_CLOCK_HPP
#include "chrono_util.hpp"
#include <algorithm>
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <thread>
#include <vector>
namespace detail {
class seconds_clock_worker
{
public:
virtual void sample() = 0;
};
//------------------------------------------------------------------------------
// Updates the clocks
class seconds_clock_thread
{
public:
using mutex = std::mutex;
using cond_var = std::condition_variable;
using lock_guard = std::lock_guard <mutex>;
using unique_lock = std::unique_lock <mutex>;
using clock_type = std::chrono::steady_clock;
using seconds = std::chrono::seconds;
using thread = std::thread;
using workers = std::vector <seconds_clock_worker*>;
bool stop_;
mutex m_;
cond_var cond_;
workers workers_;
thread thread_;
seconds_clock_thread()
: stop_(false)
{
thread_ = thread{
&seconds_clock_thread::run, this};
}
~seconds_clock_thread()
{
stop();
}
void add(seconds_clock_worker& w)
{
lock_guard lock{m_};
workers_.push_back(&w);
}
void remove(seconds_clock_worker& w)
{
lock_guard lock{m_};
workers_.erase(std::find(
workers_.begin(), workers_.end(), &w));
}
void stop()
{
if(thread_.joinable())
{
{
lock_guard lock{m_};
stop_ = true;
}
cond_.notify_all();
thread_.join();
}
}
void run()
{
unique_lock lock{m_};
for(;;)
{
for(auto iter : workers_)
iter->sample();
using namespace std::chrono;
clock_type::time_point const when(
floor <seconds>(
clock_type::now().time_since_epoch()) +
milliseconds(900));
if(cond_.wait_until(lock, when, [this]{ return stop_; }))
return;
}
}
static seconds_clock_thread& instance()
{
static seconds_clock_thread singleton;
return singleton;
}
};
} // detail
//------------------------------------------------------------------------------
/** Called before main exits to terminate the utility thread.
This is a workaround for Visual Studio 2013:
http://connect.microsoft.com/VisualStudio/feedback/details/786016/creating-a-global-c-object-that-used-thread-join-in-its-destructor-causes-a-lockup
http://stackoverflow.com/questions/10915233/stdthreadjoin-hangs-if-called-after-main-exits-when-using-vs2012-rc
*/
inline
void
basic_seconds_clock_main_hook()
{
#ifdef _MSC_VER
detail::seconds_clock_thread::instance().stop();
#endif
}
/** A clock whose minimum resolution is one second.
The purpose of this class is to optimize the performance of the now()
member function call. It uses a dedicated thread that wakes up at least
once per second to sample the requested trivial clock.
@tparam Clock A type meeting these requirements:
http://en.cppreference.com/w/cpp/concept/Clock
*/
template<class Clock>
class basic_seconds_clock
{
public:
using rep = typename Clock::rep;
using period = typename Clock::period;
using duration = typename Clock::duration;
using time_point = typename Clock::time_point;
static bool const is_steady = Clock::is_steady;
static time_point now()
{
// Make sure the thread is constructed before the
// worker otherwise we will crash during destruction
// of objects with static storage duration.
struct initializer
{
initializer()
{
detail::seconds_clock_thread::instance();
}
};
static initializer init;
struct worker : detail::seconds_clock_worker
{
time_point m_now;
std::mutex m_;
worker()
: m_now(Clock::now())
{
detail::seconds_clock_thread::instance().add(*this);
}
~worker()
{
detail::seconds_clock_thread::instance().remove(*this);
}
time_point now()
{
std::lock_guard<std::mutex> lock{m_};
return m_now;
}
void sample()
{
std::lock_guard<std::mutex> lock{m_};
m_now = Clock::now();
}
};
static worker w;
return w.now();
}
};
#endif

View File

@@ -1,58 +0,0 @@
//
// Copyright (c) 2015-2016 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef CHRONO_UTIL_HPP
#define CHRONO_UTIL_HPP
#include <chrono>
// From Howard Hinnant
// http://home.roadrunner.com/~hinnant/duration_io/chrono_util.html
#if !defined(_MSC_FULL_VER) || (_MSC_FULL_VER <= 190023506)
// round down
template <class To, class Rep, class Period>
To floor(std::chrono::duration <Rep, Period> const& d)
{
To t = std::chrono::duration_cast<To>(d);
if (t > d)
--t;
return t;
}
// round to nearest, to even on tie
template <class To, class Rep, class Period>
To round (std::chrono::duration <Rep, Period> const& d)
{
To t0 = std::chrono::duration_cast<To>(d);
To t1 = t0;
++t1;
auto diff0 = d - t0;
auto diff1 = t1 - d;
if (diff0 == diff1)
{
if (t0.count() & 1)
return t1;
return t0;
}
else if (diff0 < diff1)
return t0;
return t1;
}
// round up
template <class To, class Rep, class Period>
To ceil (std::chrono::duration <Rep, Period> const& d)
{
To t = std::chrono::duration_cast<To>(d);
if (t < d)
++t;
return t;
}
#endif
#endif

View File

@@ -20,6 +20,7 @@
#include <ripple/app/ledger/LedgerToJson.h>
#include <ripple/app/misc/TxQ.h>
#include <ripple/basics/base_uint.h>
#include <ripple/basics/date.h>
namespace ripple {

View File

@@ -46,6 +46,7 @@
#include <ripple/app/tx/apply.h>
#include <ripple/basics/ResolverAsio.h>
#include <ripple/basics/Sustain.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/json/json_reader.h>
#include <ripple/nodestore/DummyScheduler.h>
#include <ripple/overlay/Cluster.h>
@@ -269,7 +270,7 @@ private:
void operator() (Duration const& elapsed)
{
using namespace std::chrono;
auto const lastSample = ceil<milliseconds>(elapsed);
auto const lastSample = date::ceil<milliseconds>(elapsed);
lastSample_ = lastSample;
@@ -306,6 +307,7 @@ public:
std::unique_ptr<TimeKeeper> timeKeeper_;
beast::Journal m_journal;
std::unique_ptr<perf::PerfLog> perfLog_;
Application::MutexType m_masterMutex;
// Required by the SHAMapStore
@@ -399,6 +401,11 @@ public:
, m_journal (logs_->journal("Application"))
// PerfLog must be started before any other threads are launched.
, perfLog_ (perf::make_PerfLog(
perf::setup_PerfLog(config_->section("perf"), config_->CONFIG_DIR),
*this, logs_->journal("PerfLog"), [this] () { signalStop(); }))
, m_txMaster (*this)
, m_nodeStoreScheduler (*this)
@@ -425,7 +432,7 @@ public:
//
, m_jobQueue (std::make_unique<JobQueue>(
m_collectorManager->group ("jobq"), m_nodeStoreScheduler,
logs_->journal("JobQueue"), *logs_))
logs_->journal("JobQueue"), *logs_, *perfLog_))
//
// Anything which calls addJob must be a descendant of the JobQueue
@@ -488,7 +495,8 @@ public:
get_io_service (), *validators_, logs_->journal("ValidatorSite")))
, serverHandler_ (make_ServerHandler (*this, *m_networkOPs, get_io_service (),
*m_jobQueue, *m_networkOPs, *m_resourceManager, *m_collectorManager))
*m_jobQueue, *m_networkOPs, *m_resourceManager,
*m_collectorManager))
, mFeeTrack (std::make_unique<LoadFeeTrack>(logs_->journal("LoadManager")))
@@ -653,6 +661,11 @@ public:
return m_txMaster;
}
perf::PerfLog& getPerfLog () override
{
return *perfLog_;
}
NodeCache& getTempNodeCache () override
{
return m_tempNodeCache;

View File

@@ -35,6 +35,7 @@ namespace ripple {
namespace unl { class Manager; }
namespace Resource { class Manager; }
namespace NodeStore { class Database; class DatabaseShard; }
namespace perf { class PerfLog; }
// VFALCO TODO Fix forward declares required for header dependency loops
class AmendmentTable;
@@ -152,6 +153,7 @@ public:
virtual NetworkOPs& getOPs () = 0;
virtual OrderBookDB& getOrderBookDB () = 0;
virtual TransactionMaster& getMasterTransaction () = 0;
virtual perf::PerfLog& getPerfLog () = 0;
virtual
std::pair<PublicKey, SecretKey> const&

View File

@@ -166,7 +166,8 @@ void printHelp (const po::options_description& desc)
" ripple ...\n"
" ripple_path_find <json> [<ledger>]\n"
" version\n"
" server_info\n"
" server_info [counters]\n"
" server_state [counters]\n"
" sign <private_key> <tx_json> [offline]\n"
" sign_for <signer_address> <signer_private_key> <tx_json> [offline]\n"
" stop\n"

View File

@@ -40,6 +40,7 @@
#include <ripple/app/misc/impl/AccountTxPaging.h>
#include <ripple/app/tx/apply.h>
#include <ripple/basics/mulDiv.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/basics/UptimeTimer.h>
#include <ripple/core/ConfigSections.h>
#include <ripple/crypto/csprng.h>
@@ -357,7 +358,7 @@ public:
void consensusViewChange () override;
Json::Value getConsensusInfo () override;
Json::Value getServerInfo (bool human, bool admin) override;
Json::Value getServerInfo (bool human, bool admin, bool counters) override;
void clearLedgerFetch () override;
Json::Value getLedgerFetchInfo () override;
std::uint32_t acceptLedger (
@@ -2078,7 +2079,7 @@ Json::Value NetworkOPsImp::getConsensusInfo ()
return mConsensus.getJson (true);
}
Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin)
Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin, bool counters)
{
Json::Value info = Json::objectValue;
@@ -2090,6 +2091,9 @@ Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin)
info [jss::server_state] = strOperatingMode ();
info [jss::time] = to_string(date::floor<std::chrono::microseconds>(
std::chrono::system_clock::now()));
if (needNetworkLedger_)
info[jss::network_ledger] = "waiting";
@@ -2136,6 +2140,12 @@ Json::Value NetworkOPsImp::getServerInfo (bool human, bool admin)
}
}
if (counters)
{
info[jss::counters] = app_.getPerfLog().countersJson();
info[jss::current_activities] = app_.getPerfLog().currentJson();
}
info[jss::pubkey_node] = toBase58 (
TokenType::NodePublic,
app_.nodeIdentity().first);

View File

@@ -177,7 +177,8 @@ public:
virtual void consensusViewChange () = 0;
virtual Json::Value getConsensusInfo () = 0;
virtual Json::Value getServerInfo (bool human, bool admin) = 0;
virtual Json::Value getServerInfo (
bool human, bool admin, bool counters) = 0;
virtual void clearLedgerFetch () = 0;
virtual Json::Value getLedgerFetchInfo () = 0;

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include <ripple/app/misc/ValidatorList.h>
#include <ripple/basics/date.h>
#include <ripple/basics/Slice.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/json/json_reader.h>
@@ -490,10 +491,14 @@ ValidatorList::getJson() const
if (auto when = expires())
{
if (*when == TimeKeeper::time_point::max())
{
res[jss::validator_list_expires] = "never";
}
else
{
res[jss::validator_list_expires] = to_string(*when);
}
}
else
res[jss::validator_list_expires] = "unknown";

170
src/ripple/basics/PerfLog.h Normal file
View File

@@ -0,0 +1,170 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2018 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_PERFLOG_H
#define RIPPLE_BASICS_PERFLOG_H
#include <ripple/core/JobTypes.h>
#include <boost/filesystem.hpp>
#include <ripple/json/json_value.h>
#include <chrono>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
namespace beast { class Journal; }
namespace ripple {
namespace perf {
/**
* Singleton class that maintains performance counters and optionally
* writes Json-formatted data to a distinct log. It should exist prior
* to other objects launched by Application to make it accessible for
* performance logging.
*/
class PerfLog
{
public:
using steady_clock = std::chrono::steady_clock;
using system_clock = std::chrono::system_clock;
using steady_time_point = std::chrono::time_point<steady_clock>;
using system_time_point = std::chrono::time_point<system_clock>;
using seconds = std::chrono::seconds;
using milliseconds = std::chrono::milliseconds;
using microseconds = std::chrono::microseconds;
/**
* Configuration from [perf] section of rippled.cfg.
*/
struct Setup
{
boost::filesystem::path perfLog;
// log_interval is in milliseconds to support faster testing.
milliseconds logInterval {seconds(1)};
};
virtual ~PerfLog() = default;
/**
* Log start of RPC call.
*
* @param method RPC command
* @param requestId Unique identifier to track command
*/
virtual void rpcStart(std::string const& method,
std::uint64_t requestId) = 0;
/**
* Log successful finish of RPC call
*
* @param method RPC command
* @param requestId Unique identifier to track command
*/
virtual void rpcFinish(std::string const& method,
std::uint64_t requestId) = 0;
/**
* Log errored RPC call
*
* @param method RPC command
* @param requestId Unique identifier to track command
*/
virtual void rpcError(std::string const& method,
std::uint64_t requestId) = 0;
/**
* Log queued job
*
* @param type Job type
*/
virtual void jobQueue(JobType const type) = 0;
/**
* Log job executing
*
* @param type Job type
* @param dur Duration enqueued in microseconds
* @param startTime Time that execution began
* @param instance JobQueue worker thread instance
*/
virtual void jobStart(JobType const type,
microseconds dur,
steady_time_point startTime,
int instance) = 0;
/**
* Log job finishing
*
* @param type Job type
* @param dur Duration running in microseconds
* @param instance Jobqueue worker thread instance
*/
virtual void jobFinish(JobType const type,
microseconds dur, int instance) = 0;
/**
* Render performance counters in Json
*
* @return Counters Json object
*/
virtual Json::Value countersJson() const = 0;
/**
* Render currently executing jobs and RPC calls and durations in Json
*
* @return Current executing jobs and RPC calls and durations
*/
virtual Json::Value currentJson() const = 0;
/**
* Ensure enough room to store each currently executing job
*
* @param resize Number of JobQueue worker threads
*/
virtual void resizeJobs(int const resize) = 0;
/**
* Rotate perf log file
*/
virtual void rotate() = 0;
};
} //perf
class Section;
class Stoppable;
namespace perf {
PerfLog::Setup setup_PerfLog(Section const& section,
boost::filesystem::path const& configDir);
std::unique_ptr<PerfLog> make_PerfLog(
PerfLog::Setup const& setup,
Stoppable& parent,
beast::Journal journal,
std::function<void()>&& signalStop);
} // perf
} // ripple
#endif //RIPPLE_BASICS_PERFLOG_H

View File

@@ -31,14 +31,11 @@ namespace ripple {
// A few handy aliases
using days = std::chrono::duration
<int, std::ratio_multiply<
std::chrono::hours::period,
std::ratio<24>>>;
using days = std::chrono::duration<
int, std::ratio_multiply<std::chrono::hours::period, std::ratio<24>>>;
using weeks = std::chrono::duration
<int, std::ratio_multiply<
days::period, std::ratio<7>>>;
using weeks = std::chrono::duration<
int, std::ratio_multiply<days::period, std::ratio<7>>>;
/** Clock for measuring Ripple Network Time.
@@ -58,21 +55,30 @@ public:
static bool const is_steady = false;
};
std::string to_string(NetClock::time_point tp);
std::string to_string(std::chrono::system_clock::time_point tp);
template <class Duration>
std::string
to_string(date::sys_time<Duration> tp)
{
return date::format("%Y-%b-%d %T", tp);
}
inline
std::string
to_string(NetClock::time_point tp)
{
using namespace std::chrono;
return to_string(
system_clock::time_point{tp.time_since_epoch() + 946684800s});
}
/** A clock for measuring elapsed time.
The epoch is unspecified.
*/
using Stopwatch =
beast::abstract_clock<
std::chrono::steady_clock>;
using Stopwatch = beast::abstract_clock<std::chrono::steady_clock>;
/** A manual Stopwatch for unit tests. */
using TestStopwatch =
beast::manual_clock<
std::chrono::steady_clock>;
using TestStopwatch = beast::manual_clock<std::chrono::steady_clock>;
/** Returns an instance of a wall clock. */
inline
@@ -81,8 +87,7 @@ stopwatch()
{
return beast::get_abstract_clock<
std::chrono::steady_clock,
beast::basic_seconds_clock<
std::chrono::steady_clock>>();
beast::basic_seconds_clock<std::chrono::steady_clock>>();
}
} // ripple

8010
src/ripple/basics/date.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,528 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2018 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/basics/BasicConfig.h>
#include <ripple/basics/impl/PerfLogImp.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <ripple/beast/utility/Journal.h>
#include <ripple/json/json_writer.h>
#include <ripple/json/to_string.h>
#include <boost/optional.hpp>
#include <atomic>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <iterator>
#include <sstream>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <utility>
namespace ripple {
namespace perf {
PerfLogImp::Counters::Counters(std::vector<char const*> const& labels,
JobTypes const& jobTypes)
{
{
// populateRpc
rpc_.reserve(labels.size());
for (std::string const label : labels)
{
auto const elem = rpc_.emplace(label, Rpc());
if (!elem.second)
{
// Ensure that no other function populates this entry.
assert(false);
}
}
}
{
// populateJq
jq_.reserve(jobTypes.size());
for (auto const& job : jobTypes)
{
auto const elem = jq_.emplace(job.first, Jq(job.second.name()));
if (!elem.second)
{
// Ensure that no other function populates this entry.
assert(false);
}
}
}
}
Json::Value
PerfLogImp::Counters::countersJson() const
{
Json::Value rpcobj(Json::objectValue);
// totalRpc represents all rpc methods. All that started, finished, etc.
Rpc totalRpc;
for (auto const& proc : rpc_)
{
Json::Value p(Json::objectValue);
{
auto const sync =
[&proc]() ->boost::optional<Counters::Rpc::Sync> {
std::lock_guard<std::mutex> lock(proc.second.mut);
if (!proc.second.sync.started &&
!proc.second.sync.finished &&
!proc.second.sync.errored)
{
return boost::none;
}
return proc.second.sync;
}();
if (!sync)
continue;
p[jss::started] = std::to_string(sync->started);
totalRpc.sync.started += sync->started;
p[jss::finished] = std::to_string(sync->finished);
totalRpc.sync.finished += sync->finished;
p[jss::errored] = std::to_string(sync->errored);
totalRpc.sync.errored += sync->errored;
p[jss::duration_us] = std::to_string(sync->duration.count());
totalRpc.sync.duration += sync->duration;
}
rpcobj[proc.first] = p;
}
if (totalRpc.sync.started)
{
Json::Value totalRpcJson(Json::objectValue);
totalRpcJson[jss::started] = std::to_string(totalRpc.sync.started);
totalRpcJson[jss::finished] = std::to_string(totalRpc.sync.finished);
totalRpcJson[jss::errored] = std::to_string(totalRpc.sync.errored);
totalRpcJson[jss::duration_us] = std::to_string(
totalRpc.sync.duration.count());
rpcobj[jss::total] = totalRpcJson;
}
Json::Value jqobj(Json::objectValue);
// totalJq represents all jobs. All enqueued, started, finished, etc.
Jq totalJq("total");
for (auto const& proc : jq_)
{
Json::Value j(Json::objectValue);
{
auto const sync =
[&proc]() ->boost::optional<Counters::Jq::Sync> {
std::lock_guard<std::mutex> lock(proc.second.mut);
if (!proc.second.sync.queued &&
!proc.second.sync.started &&
!proc.second.sync.finished)
{
return boost::none;
}
return proc.second.sync;
}();
if (!sync)
continue;
j[jss::queued] = std::to_string(sync->queued);
totalJq.sync.queued += sync->queued;
j[jss::started] = std::to_string(sync->started);
totalJq.sync.started += sync->started;
j[jss::finished] = std::to_string(sync->finished);
totalJq.sync.finished += sync->finished;
j[jss::queued_duration_us] = std::to_string(
sync->queuedDuration.count());
totalJq.sync.queuedDuration += sync->queuedDuration;
j[jss::running_duration_us] = std::to_string(
sync->runningDuration.count());
totalJq.sync.runningDuration += sync->runningDuration;
}
jqobj[proc.second.label] = j;
}
if (totalJq.sync.queued)
{
Json::Value totalJqJson(Json::objectValue);
totalJqJson[jss::queued] = std::to_string(totalJq.sync.queued);
totalJqJson[jss::started] = std::to_string(totalJq.sync.started);
totalJqJson[jss::finished] = std::to_string(totalJq.sync.finished);
totalJqJson[jss::queued_duration_us] = std::to_string(
totalJq.sync.queuedDuration.count());
totalJqJson[jss::running_duration_us] = std::to_string(
totalJq.sync.runningDuration.count());
jqobj[jss::total] = totalJqJson;
}
Json::Value counters(Json::objectValue);
// Be kind to reporting tools and let them expect rpc and jq objects
// even if empty.
counters[jss::rpc] = rpcobj;
counters[jss::job_queue] = jqobj;
return counters;
}
Json::Value
PerfLogImp::Counters::currentJson() const
{
auto const present = steady_clock::now();
Json::Value jobsArray(Json::arrayValue);
auto const jobs = [this]{
std::lock_guard<std::mutex> lock(jobsMutex_);
return jobs_;
}();
for (auto const& j : jobs)
{
if (j.first == jtINVALID)
continue;
Json::Value jobj(Json::objectValue);
auto const e = jq_.find(j.first);
if (e == jq_.end())
{
assert(false);
continue;
}
// label is const and created before multi-threading so needs no lock.
jobj[jss::job] = e->second.label;
jobj[jss::duration_us] = std::to_string(
std::chrono::duration_cast<microseconds>(
present - j.second).count());
jobsArray.append(jobj);
}
Json::Value methodsArray(Json::arrayValue);
std::vector<MethodStart> methods;
{
std::lock_guard<std::mutex> lock(methodsMutex_);
methods.reserve(methods_.size());
for (auto const& m : methods_)
methods.push_back(m.second);
}
for (auto m : methods)
{
Json::Value methodobj(Json::objectValue);
methodobj[jss::method] = m.first;
methodobj[jss::duration_us] = std::to_string(
std::chrono::duration_cast<microseconds>(
present - m.second).count());
methodsArray.append(methodobj);
}
Json::Value current(Json::objectValue);
current[jss::jobs] = jobsArray;
current[jss::methods] = methodsArray;
return current;
}
//-----------------------------------------------------------------------------
void
PerfLogImp::openLog()
{
if (! setup_.perfLog.empty())
{
if (logFile_.is_open())
logFile_.close();
auto logDir = setup_.perfLog.parent_path();
if (!boost::filesystem::is_directory(logDir))
{
boost::system::error_code ec;
boost::filesystem::create_directories(logDir, ec);
if (ec)
{
JLOG(j_.fatal()) << "Unable to create performance log "
"directory " << logDir << ": " << ec.message();
signalStop_();
return;
}
}
logFile_.open(setup_.perfLog.c_str(), std::ios::out | std::ios::app);
if (! logFile_)
{
JLOG(j_.fatal()) << "Unable to open performance log " <<
setup_.perfLog << ".";
signalStop_();
}
}
}
void
PerfLogImp::run()
{
beast::setCurrentThreadName("perflog");
lastLog_ = system_clock::now();
while (true)
{
{
std::unique_lock<std::mutex> lock(mutex_);
if (stop_)
{
return;
}
if (rotate_)
{
openLog();
rotate_ = false;
}
cond_.wait_until (lock, lastLog_ + setup_.logInterval);
}
report();
}
}
void
PerfLogImp::report()
{
if (! logFile_)
// If logFile_ is not writable do no further work.
return;
auto const present = system_clock::now();
if (present < lastLog_ + setup_.logInterval)
return;
lastLog_ = present;
Json::Value report(Json::objectValue);
report[jss::time] = to_string(date::floor<microseconds>(present));
report[jss::workers] = counters_.workers_;
report[jss::hostid] = hostname_;
report[jss::counters] = counters_.countersJson();
auto cur = counters_.currentJson();
report[jss::current_activities] = counters_.currentJson();
logFile_ << Json::Compact{std::move(report)} << std::endl;
}
PerfLogImp::PerfLogImp(Setup const& setup,
Stoppable& parent,
beast::Journal journal,
std::function<void()>&& signalStop)
: Stoppable ("PerfLogImp", parent)
, setup_ (setup)
, j_ (journal)
, signalStop_ (std::move (signalStop))
{
openLog();
}
PerfLogImp::~PerfLogImp()
{
onStop();
}
void
PerfLogImp::rpcStart(std::string const& method, std::uint64_t const requestId)
{
auto counter = counters_.rpc_.find(method);
if (counter == counters_.rpc_.end())
{
assert(false);
return;
}
{
std::lock_guard<std::mutex> lock(counter->second.mut);
++counter->second.sync.started;
}
std::lock_guard<std::mutex> lock(counters_.methodsMutex_);
counters_.methods_[requestId] = {
counter->first.c_str(),
steady_clock::now()
};
}
void
PerfLogImp::rpcEnd(std::string const& method,
std::uint64_t const requestId,
bool finish)
{
auto counter = counters_.rpc_.find(method);
if (counter == counters_.rpc_.end())
{
assert(false);
return;
}
steady_time_point startTime;
{
std::lock_guard<std::mutex> lock(counters_.methodsMutex_);
auto const e = counters_.methods_.find(requestId);
if (e != counters_.methods_.end())
{
startTime = e->second.second;
counters_.methods_.erase(e);
}
else
{
assert(false);
}
}
std::lock_guard<std::mutex> lock(counter->second.mut);
if (finish)
++counter->second.sync.finished;
else
++counter->second.sync.errored;
counter->second.sync.duration +=
std::chrono::duration_cast<microseconds>(
steady_clock::now() - startTime);
}
void
PerfLogImp::jobQueue(JobType const type)
{
auto counter = counters_.jq_.find(type);
if (counter == counters_.jq_.end())
{
assert(false);
return;
}
std::lock_guard<std::mutex> lock(counter->second.mut);
++counter->second.sync.queued;
}
void
PerfLogImp::jobStart(JobType const type,
microseconds dur,
steady_time_point startTime,
int instance)
{
auto counter = counters_.jq_.find(type);
if (counter == counters_.jq_.end())
{
assert(false);
return;
}
{
std::lock_guard<std::mutex> lock(counter->second.mut);
++counter->second.sync.started;
counter->second.sync.queuedDuration += dur;
}
std::lock_guard<std::mutex> lock(counters_.jobsMutex_);
if (instance >= 0 && instance < counters_.jobs_.size())
counters_.jobs_[instance] = {type, startTime};
}
void
PerfLogImp::jobFinish(JobType const type, microseconds dur,
int instance)
{
auto counter = counters_.jq_.find(type);
if (counter == counters_.jq_.end())
{
assert(false);
return;
}
{
std::lock_guard<std::mutex> lock(counter->second.mut);
++counter->second.sync.finished;
counter->second.sync.runningDuration += dur;
}
std::lock_guard<std::mutex> lock(counters_.jobsMutex_);
if (instance >= 0 && instance < counters_.jobs_.size())
counters_.jobs_[instance] = {jtINVALID, steady_time_point()};
}
void
PerfLogImp::resizeJobs(int const resize)
{
std::lock_guard<std::mutex> lock(counters_.jobsMutex_);
counters_.workers_ = resize;
if (resize > counters_.jobs_.size())
counters_.jobs_.resize(resize, {jtINVALID, steady_time_point()});
}
void
PerfLogImp::rotate()
{
if (setup_.perfLog.empty())
return;
std::lock_guard<std::mutex> lock(mutex_);
rotate_ = true;
cond_.notify_one();
}
void
PerfLogImp::onStart()
{
if (setup_.perfLog.size())
thread_ = std::thread(&PerfLogImp::run, this);
}
void
PerfLogImp::onStop()
{
if (thread_.joinable())
{
{
std::lock_guard<std::mutex> lock(mutex_);
stop_ = true;
cond_.notify_one();
}
thread_.join();
}
if (areChildrenStopped())
stopped();
}
void
PerfLogImp::onChildrenStopped()
{
onStop();
}
//-----------------------------------------------------------------------------
PerfLog::Setup
setup_PerfLog(Section const& section, boost::filesystem::path const& configDir)
{
PerfLog::Setup setup;
std::string perfLog;
set(perfLog, "perf_log", section);
if (perfLog.size())
{
setup.perfLog = boost::filesystem::path(perfLog);
if (setup.perfLog.is_relative())
{
setup.perfLog = boost::filesystem::absolute(
setup.perfLog, configDir);
}
}
std::uint64_t logInterval;
if (get_if_exists(section, "log_interval", logInterval))
setup.logInterval = std::chrono::seconds(logInterval);
return setup;
}
std::unique_ptr<PerfLog>
make_PerfLog(
PerfLog::Setup const& setup,
Stoppable& parent,
beast::Journal journal,
std::function<void()>&& signalStop)
{
return std::make_unique<PerfLogImp>(
setup, parent, journal, std::move(signalStop));
}
} // perf
} // ripple

View File

@@ -0,0 +1,219 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2018 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef RIPPLE_BASICS_PERFLOGIMP_H
#define RIPPLE_BASICS_PERFLOGIMP_H
#include <ripple/basics/chrono.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/beast/utility/Journal.h>
#include <ripple/core/Stoppable.h>
#include <ripple/protocol/JsonFields.h>
#include <ripple/rpc/impl/Handler.h>
#include <boost/asio/ip/host_name.hpp>
#include <condition_variable>
#include <cstdint>
#include <fstream>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
namespace ripple {
namespace perf {
/**
* Implementation class for PerfLog.
*/
class PerfLogImp
: public PerfLog, Stoppable
{
/**
* Track performance counters and currently executing tasks.
*/
struct Counters
{
public:
using MethodStart = std::pair<char const*, steady_time_point>;
/**
* RPC performance counters.
*/
struct Rpc
{
// Keep all items that need to be synchronized in one place
// to minimize copy overhead while locked.
struct Sync
{
// Counters for each time a method starts and then either
// finishes successfully or with an exception.
std::uint64_t started {0};
std::uint64_t finished {0};
std::uint64_t errored {0};
// Cumulative duration of all finished and errored method calls.
microseconds duration {0};
};
Sync sync;
mutable std::mutex mut;
Rpc() = default;
Rpc(Rpc const& orig)
: sync (orig.sync)
{}
};
/**
* Job Queue task performance counters.
*/
struct Jq
{
// Keep all items that need to be synchronized in one place
// to minimize copy overhead while locked.
struct Sync
{
// Counters for each time a job is enqueued, begins to run,
// finishes.
std::uint64_t queued {0};
std::uint64_t started {0};
std::uint64_t finished {0};
// Cumulative duration of all jobs' queued and running times.
microseconds queuedDuration {0};
microseconds runningDuration {0};
};
Sync sync;
std::string const label;
mutable std::mutex mut;
Jq(std::string const& labelArg)
: label (labelArg)
{}
Jq(Jq const& orig)
: sync (orig.sync)
, label (orig.label)
{}
};
// rpc_ and jq_ do not need mutex protection because all
// keys and values are created before more threads are started.
std::unordered_map<std::string, Rpc> rpc_;
std::unordered_map<std::underlying_type_t<JobType>, Jq> jq_;
std::vector<std::pair<JobType, steady_time_point>> jobs_;
int workers_ {0};
mutable std::mutex jobsMutex_;
std::unordered_map<std::uint64_t, MethodStart> methods_;
mutable std::mutex methodsMutex_;
Counters(std::vector<char const*> const& labels,
JobTypes const& jobTypes);
Json::Value countersJson() const;
Json::Value currentJson() const;
};
Setup const setup_;
beast::Journal j_;
std::function<void()> const signalStop_;
Counters counters_ {ripple::RPC::getHandlerNames(), JobTypes::instance()};
std::ofstream logFile_;
std::thread thread_;
std::mutex mutex_;
std::condition_variable cond_;
system_time_point lastLog_;
std::string const hostname_ {boost::asio::ip::host_name()};
bool stop_ {false};
bool rotate_ {false};
void openLog();
void run();
void report();
void rpcEnd(std::string const& method,
std::uint64_t const requestId,
bool finish);
public:
PerfLogImp(Setup const& setup,
Stoppable& parent,
beast::Journal journal,
std::function<void()>&& signalStop);
~PerfLogImp() override;
void rpcStart(
std::string const& method, std::uint64_t const requestId) override;
void rpcFinish(
std::string const& method,
std::uint64_t const requestId) override
{
rpcEnd(method, requestId, true);
}
void rpcError(std::string const& method,
std::uint64_t const requestId) override
{
rpcEnd(method, requestId, false);
}
void jobQueue(JobType const type) override;
void jobStart(
JobType const type,
microseconds dur,
steady_time_point startTime,
int instance) override;
void jobFinish(
JobType const type,
microseconds dur,
int instance) override;
Json::Value
countersJson() const override
{
return counters_.countersJson();
}
Json::Value
currentJson() const override
{
return counters_.currentJson();
}
void resizeJobs(int const resize) override;
void rotate() override;
// Stoppable
void onPrepare() override {}
// Called when application is ready to start threads.
void onStart() override;
// Called when the application begins shutdown.
void onStop() override;
// Called when all child Stoppable objects have stopped.
void onChildrenStopped() override;
};
} // perf
} // ripple
#endif //RIPPLE_BASICS_PERFLOGIMP_H

View File

@@ -1,82 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of rippled: https://github.com/ripple/rippled
Copyright (c) 2012, 2013 Ripple Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/beast/clock/chrono_util.h>
#include <ripple/basics/chrono.h>
#include <iomanip>
#include <sstream>
#include <tuple>
namespace ripple {
static
std::tuple<int, unsigned, unsigned>
civil_from_days(int z) noexcept
{
z += 719468;
const int era = (z >= 0 ? z : z - 146096) / 146097;
const unsigned doe = static_cast<unsigned>(z - era * 146097); // [0, 146096]
const unsigned yoe = (doe - doe/1460 + doe/36524 - doe/146096) / 365; // [0, 399]
const int y = static_cast<int>(yoe) + era * 400;
const unsigned doy = doe - (365*yoe + yoe/4 - yoe/100); // [0, 365]
const unsigned mp = (5*doy + 2)/153; // [0, 11]
const unsigned d = doy - (153*mp+2)/5 + 1; // [1, 31]
const unsigned m = mp + (mp < 10 ? 3 : -9); // [1, 12]
return std::tuple<int, unsigned, unsigned>(y + (m <= 2), m, d);
}
std::string
to_string(std::chrono::system_clock::time_point tp)
{
const char* months[] =
{
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
};
using namespace std::chrono;
auto s = floor<seconds>(tp.time_since_epoch());
auto sd = floor<days>(s); // number of days
s -= sd; // time of day in seconds
auto h = floor<hours>(s);
s -= h;
auto m = floor<minutes>(s);
s -= m;
int y;
unsigned mn, d;
std::tie(y, mn, d) = civil_from_days(static_cast<int>(sd.count()));
// Date-time in y/mn/d h:m:s
std::ostringstream str;
str.fill('0');
str.flags(std::ios::dec | std::ios::right);
using std::setw;
str << y << '-' << months[mn-1] << '-' << setw(2) << d << ' '
<< setw(2) << h.count() << ':'
<< setw(2) << m.count() << ':'
<< setw(2) << s.count();
return str.str();
}
std::string
to_string(NetClock::time_point tp)
{
using namespace std::chrono;
return to_string(system_clock::time_point{tp.time_since_epoch() + 946684800s});
}
} // ripple

View File

@@ -20,7 +20,7 @@
#ifndef BEAST_CHRONO_BASIC_SECONDS_CLOCK_H_INCLUDED
#define BEAST_CHRONO_BASIC_SECONDS_CLOCK_H_INCLUDED
#include <ripple/beast/clock/chrono_util.h>
#include <ripple/basics/date.h>
#include <algorithm>
#include <chrono>
@@ -108,7 +108,7 @@ public:
using namespace std::chrono;
clock_type::time_point const when (
floor <seconds> (
date::floor <seconds> (
clock_type::now().time_since_epoch()) +
seconds (1));

View File

@@ -1,70 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_CHRONO_CHRONO_UTIL_H_INCLUDED
#define BEAST_CHRONO_CHRONO_UTIL_H_INCLUDED
#include <chrono>
// From Howard Hinnant
// http://home.roadrunner.com/~hinnant/duration_io/chrono_util.html
#if !defined(_MSC_FULL_VER) || (_MSC_FULL_VER <= 190023506)
// round down
template <class To, class Rep, class Period>
To floor(std::chrono::duration <Rep, Period> const& d)
{
To t = std::chrono::duration_cast<To>(d);
if (t > d)
--t;
return t;
}
// round to nearest, to even on tie
template <class To, class Rep, class Period>
To round (std::chrono::duration <Rep, Period> const& d)
{
To t0 = std::chrono::duration_cast<To>(d);
To t1 = t0;
++t1;
auto diff0 = d - t0;
auto diff1 = t1 - d;
if (diff0 == diff1)
{
if (t0.count() & 1)
return t1;
return t0;
}
else if (diff0 < diff1)
return t0;
return t1;
}
// round up
template <class To, class Rep, class Period>
To ceil (std::chrono::duration <Rep, Period> const& d)
{
To t = std::chrono::duration_cast<To>(d);
if (t < d)
++t;
return t;
}
#endif
#endif

View File

@@ -23,7 +23,7 @@
#include <ripple/beast/insight/Base.h>
#include <ripple/beast/insight/EventImpl.h>
#include <ripple/beast/clock/chrono_util.h>
#include <ripple/basics/date.h>
#include <chrono>
#include <memory>
@@ -67,7 +67,7 @@ public:
{
using namespace std::chrono;
if (m_impl)
m_impl->notify (ceil <value_type> (value));
m_impl->notify (date::ceil <value_type> (value));
}
std::shared_ptr <EventImpl> const& impl () const

View File

@@ -89,7 +89,9 @@ public:
private:
boost::filesystem::path CONFIG_FILE;
public:
boost::filesystem::path CONFIG_DIR;
private:
boost::filesystem::path DEBUG_LOGFILE;
void load ();

View File

@@ -31,6 +31,11 @@
namespace ripple {
namespace perf
{
class PerfLog;
}
class Logs;
struct Coro_create_t
{
@@ -132,7 +137,8 @@ public:
using JobFunction = std::function <void(Job&)>;
JobQueue (beast::insight::Collector::ptr const& collector,
Stoppable& parent, beast::Journal journal, Logs& logs);
Stoppable& parent, beast::Journal journal, Logs& logs,
perf::PerfLog& perfLog);
~JobQueue ();
/** Adds a job to the JobQueue.
@@ -226,18 +232,13 @@ private:
Job::CancelCallback m_cancelCallback;
// Statistics tracking
perf::PerfLog& perfLog_;
beast::insight::Collector::ptr m_collector;
beast::insight::Gauge job_count;
beast::insight::Hook hook;
std::condition_variable cv_;
static JobTypes const& getJobTypes()
{
static JobTypes types;
return types;
}
void collect();
JobTypeData& getJobTypeData (JobType type);
@@ -304,14 +305,6 @@ private:
// <none>
void finishJob (JobType type);
template <class Rep, class Period>
void on_dequeue (JobType type,
std::chrono::duration <Rep, Period> const& value);
template <class Rep, class Period>
void on_execute (JobType type,
std::chrono::duration <Rep, Period> const& value);
// Runs the next appropriate waiting Job.
//
// Pre-conditions:
@@ -322,7 +315,7 @@ private:
//
// Invariants:
// <none>
void processTask () override;
void processTask (int instance) override;
// Returns the limit of running jobs for the given job type.
// For jobs with no limit, we return the largest int. Hopefully that

View File

@@ -48,7 +48,7 @@ public:
bool special, std::chrono::milliseconds avgLatency,
std::chrono::milliseconds peakLatency)
: m_type (type)
, m_name (name)
, m_name (std::move(name))
, m_limit (limit)
, m_special (special)
, m_avgLatency (avgLatency)
@@ -62,7 +62,7 @@ public:
return m_type;
}
std::string name () const
std::string const& name () const
{
return m_name;
}

View File

@@ -23,6 +23,9 @@
#include <ripple/core/Job.h>
#include <ripple/core/JobTypeInfo.h>
#include <map>
#include <string>
#include <type_traits>
#include <unordered_map>
namespace ripple
{
@@ -33,6 +36,7 @@ public:
using Map = std::map <JobType, JobTypeInfo>;
using const_iterator = Map::const_iterator;
private:
JobTypes ()
: m_unknown (jtINVALID, "invalid", 0, true, std::chrono::milliseconds{0},
std::chrono::milliseconds{0})
@@ -79,6 +83,13 @@ add( jtNS_WRITE, "WriteNode", 0, true, 0ms, 0m
}
public:
static JobTypes const& instance()
{
static JobTypes const types;
return types;
}
JobTypeInfo const& get (JobType jt) const
{
Map::const_iterator const iter (m_map.find (jt));
@@ -95,6 +106,11 @@ add( jtNS_WRITE, "WriteNode", 0, true, 0ms, 0m
return m_unknown;
}
Map::size_type size () const
{
return m_map.size();
}
const_iterator begin () const
{
return m_map.cbegin ();

View File

@@ -130,7 +130,7 @@ class RootStoppable;
For stoppables which are only considered stopped when all of their
children have stopped, and their own internal logic indicates a stop, it
will be necessary to perform special actions in onChildrenStopped(). The
funtion areChildrenStopped() can be used after children have stopped,
function areChildrenStopped() can be used after children have stopped,
but before the Stoppable logic itself has stopped, to determine if the
stoppable's logic is a true stop.

View File

@@ -19,18 +19,21 @@
#include <ripple/core/JobQueue.h>
#include <ripple/basics/contract.h>
#include <ripple/basics/PerfLog.h>
namespace ripple {
JobQueue::JobQueue (beast::insight::Collector::ptr const& collector,
Stoppable& parent, beast::Journal journal, Logs& logs)
Stoppable& parent, beast::Journal journal, Logs& logs,
perf::PerfLog& perfLog)
: Stoppable ("JobQueue", parent)
, m_journal (journal)
, m_lastJob (0)
, m_invalidJobData (getJobTypes ().getInvalid (), collector, logs)
, m_invalidJobData (JobTypes::instance().getInvalid (), collector, logs)
, m_processCount (0)
, m_workers (*this, "JobQueue", 0)
, m_workers (*this, perfLog, "JobQueue", 0)
, m_cancelCallback (std::bind (&Stoppable::isStopping, this))
, perfLog_ (perfLog)
, m_collector (collector)
{
hook = m_collector->make_hook (std::bind (&JobQueue::collect, this));
@@ -39,7 +42,7 @@ JobQueue::JobQueue (beast::insight::Collector::ptr const& collector,
{
std::lock_guard <std::mutex> lock (m_mutex);
for (auto const& x : getJobTypes ())
for (auto const& x : JobTypes::instance())
{
JobTypeInfo const& jt = x.second;
@@ -329,6 +332,7 @@ JobQueue::queueJob (Job const& job, std::lock_guard <std::mutex> const& lock)
JobType const type (job.getType ());
assert (type != jtINVALID);
assert (m_jobSet.find (job) != m_jobSet.end ());
perfLog_.jobQueue(type);
JobTypeData& data (getJobTypeData (type));
@@ -398,34 +402,13 @@ JobQueue::finishJob (JobType type)
--data.running;
}
template <class Rep, class Period>
void JobQueue::on_dequeue (JobType type,
std::chrono::duration <Rep, Period> const& value)
{
using namespace std::chrono;
auto const ms = ceil<milliseconds>(value);
if (ms >= 10ms)
getJobTypeData (type).dequeue.notify (ms);
}
template <class Rep, class Period>
void JobQueue::on_execute (JobType type,
std::chrono::duration <Rep, Period> const& value)
{
using namespace std::chrono;
auto const ms (ceil <milliseconds> (value));
if (ms >= 10ms)
getJobTypeData (type).execute.notify (ms);
}
void
JobQueue::processTask ()
JobQueue::processTask (int instance)
{
JobType type;
{
using namespace std::chrono;
Job::clock_type::time_point const start_time (
Job::clock_type::now());
{
@@ -438,10 +421,18 @@ JobQueue::processTask ()
type = job.getType();
JobTypeData& data(getJobTypeData(type));
JLOG(m_journal.trace()) << "Doing " << data.name () << " job";
on_dequeue (job.getType (), start_time - job.queue_time ());
auto const us = date::ceil<microseconds>(
start_time - job.queue_time());
perfLog_.jobStart(type, us, start_time, instance);
if (us >= 10ms)
getJobTypeData(type).dequeue.notify(us);
job.doJob ();
}
on_execute(type, Job::clock_type::now() - start_time);
auto const us (
date::ceil<microseconds>(Job::clock_type::now() - start_time));
perfLog_.jobFinish(type, us, instance);
if (us >= 10ms)
getJobTypeData(type).execute.notify(us);
}
{
@@ -462,7 +453,7 @@ JobQueue::processTask ()
int
JobQueue::getJobLimit (JobType type)
{
JobTypeInfo const& j (getJobTypes ().get (type));
JobTypeInfo const& j (JobTypes::instance().get (type));
assert (j.type () != jtINVALID);
return j.limit ();

View File

@@ -19,7 +19,7 @@
#include <ripple/basics/Log.h>
#include <ripple/basics/UptimeTimer.h>
#include <ripple/beast/clock/chrono_util.h>
#include <ripple/basics/date.h>
#include <ripple/core/LoadMonitor.h>
namespace ripple {
@@ -108,14 +108,15 @@ void LoadMonitor::addLoadSample (LoadEvent const& s)
auto const total = s.runTime() + s.waitTime();
// Don't include "jitter" as part of the latency
auto const latency = total < 2ms ? 0ms : round<milliseconds>(total);
auto const latency = total < 2ms ? 0ms : date::round<milliseconds>(total);
if (latency > 500ms)
{
auto mj = (latency > 1s) ? j_.warn() : j_.info();
JLOG (mj) << "Job: " << s.name() <<
" run: " << round<milliseconds>(s.runTime()).count() << "ms" <<
" wait: " << round<milliseconds>(s.waitTime()).count() << "ms";
" run: " << date::round<milliseconds>(s.runTime()).count() <<
"ms" << " wait: " <<
date::round<milliseconds>(s.waitTime()).count() << "ms";
}
addSamples (1, latency);

View File

@@ -18,6 +18,7 @@
//==============================================================================
#include <ripple/core/impl/Workers.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/beast/core/CurrentThreadName.h>
#include <cassert>
@@ -25,9 +26,11 @@ namespace ripple {
Workers::Workers (
Callback& callback,
perf::PerfLog& perfLog,
std::string const& threadNames,
int numberOfThreads)
: m_callback (callback)
, perfLog_ (perfLog)
, m_threadNames (threadNames)
, m_allPaused (true, true)
, m_semaphore (0)
@@ -57,12 +60,14 @@ int Workers::getNumberOfThreads () const noexcept
//
void Workers::setNumberOfThreads (int numberOfThreads)
{
static int instance {0};
if (m_numberOfThreads != numberOfThreads)
{
perfLog_.resizeJobs(numberOfThreads);
if (numberOfThreads > m_numberOfThreads)
{
// Increasing the number of working threads
int const amount = numberOfThreads - m_numberOfThreads;
for (int i = 0; i < amount; ++i)
@@ -79,15 +84,14 @@ void Workers::setNumberOfThreads (int numberOfThreads)
}
else
{
worker = new Worker (*this, m_threadNames);
worker = new Worker (*this, m_threadNames, instance++);
m_everyone.push_front (worker);
}
}
}
else if (numberOfThreads < m_numberOfThreads)
else
{
// Decreasing the number of working threads
int const amount = m_numberOfThreads - numberOfThreads;
for (int i = 0; i < amount; ++i)
@@ -142,9 +146,11 @@ void Workers::deleteWorkers (beast::LockFreeStack <Worker>& stack)
//------------------------------------------------------------------------------
Workers::Worker::Worker (Workers& workers, std::string const& threadName)
Workers::Worker::Worker (Workers& workers, std::string const& threadName,
int const instance)
: m_workers {workers}
, threadName_ {threadName}
, instance_ {instance}
, wakeCount_ {0}
, shouldExit_ {false}
{
@@ -216,7 +222,7 @@ void Workers::Worker::run ()
// unblocked in order to process a task.
//
++m_workers.m_runningTaskCount;
m_workers.m_callback.processTask ();
m_workers.m_callback.processTask (instance_);
--m_workers.m_runningTaskCount;
}

View File

@@ -31,6 +31,11 @@
namespace ripple {
namespace perf
{
class PerfLog;
}
/** A group of threads that process tasks.
*/
class Workers
@@ -45,9 +50,11 @@ public:
that you only process one task from inside your callback. Each
call to addTask will result in exactly one call to processTask.
@param instance The worker thread instance.
@see Workers::addTask
*/
virtual void processTask () = 0;
virtual void processTask (int instance) = 0;
};
/** Create the object.
@@ -58,6 +65,7 @@ public:
@param threadNames The name given to each created worker thread.
*/
explicit Workers (Callback& callback,
perf::PerfLog& perfLog,
std::string const& threadNames = "Worker",
int numberOfThreads =
static_cast<int>(std::thread::hardware_concurrency()));
@@ -126,7 +134,9 @@ private:
, public beast::LockFreeStack <Worker, PausedTag>::Node
{
public:
Worker (Workers& workers, std::string const& threadName);
Worker (Workers& workers,
std::string const& threadName,
int const instance);
~Worker ();
@@ -138,6 +148,7 @@ private:
private:
Workers& m_workers;
std::string const threadName_;
int const instance_;
std::thread thread_;
std::mutex mutex_;
@@ -151,6 +162,7 @@ private:
private:
Callback& m_callback;
perf::PerfLog& perfLog_;
std::string m_threadNames; // The name to give each thread
beast::WaitableEvent m_allPaused; // signaled when all threads paused
semaphore m_semaphore; // each pending task is 1 resource

View File

@@ -1013,6 +1013,15 @@ private:
return jvRequest;
}
// server_info [counters]
Json::Value parseServerInfo (Json::Value const& jvParams)
{
Json::Value jvRequest (Json::objectValue);
if (jvParams.size() == 1 && jvParams[0u].asString() == "counters")
jvRequest[jss::counters] = true;
return jvRequest;
}
public:
//--------------------------------------------------------------------------
@@ -1088,8 +1097,8 @@ public:
{ "sign_for", &RPCParser::parseSignFor, 3, 4 },
{ "submit", &RPCParser::parseSignSubmit, 1, 3 },
{ "submit_multisigned", &RPCParser::parseSubmitMultiSigned, 1, 1 },
{ "server_info", &RPCParser::parseAsIs, 0, 0 },
{ "server_state", &RPCParser::parseAsIs, 0, 0 },
{ "server_info", &RPCParser::parseServerInfo, 0, 1 },
{ "server_state", &RPCParser::parseServerInfo, 0, 1 },
{ "stop", &RPCParser::parseAsIs, 0, 0 },
{ "transaction_entry", &RPCParser::parseTransactionEntry, 2, 2 },
{ "tx", &RPCParser::parseTx, 1, 2 },

View File

@@ -126,10 +126,12 @@ JSS ( consensus ); // out: NetworkOPs, LedgerConsensus
JSS ( converge_time ); // out: NetworkOPs
JSS ( converge_time_s ); // out: NetworkOPs
JSS ( count ); // in: AccountTx*, ValidatorList
JSS ( counters ); // in/out: retrieve counters
JSS ( currency ); // in: paths/PathRequest, STAmount
// out: paths/Node, STPathSet, STAmount,
// AccountLines
JSS ( current ); // out: OwnerInfo
JSS ( current_activities );
JSS ( current_ledger_size ); // out: TxQ
JSS ( current_queue_size ); // out: TxQ
JSS ( data ); // out: LedgerData
@@ -158,6 +160,7 @@ JSS ( engine_result ); // out: NetworkOPs, TransactionSign, Submit
JSS ( engine_result_code ); // out: NetworkOPs, TransactionSign, Submit
JSS ( engine_result_message ); // out: NetworkOPs, TransactionSign, Submit
JSS ( error ); // out: error
JSS ( errored );
JSS ( error_code ); // out: error
JSS ( error_exception ); // out: Submit
JSS ( error_message ); // out: error
@@ -178,6 +181,7 @@ JSS ( fee_mult_max ); // in: TransactionSign
JSS ( fee_ref ); // out: NetworkOPs
JSS ( fetch_pack ); // out: NetworkOPs
JSS ( first ); // out: rpc/Version
JSS ( finished );
JSS ( fix_txns ); // in: LedgerCleaner
JSS ( flags ); // out: paths/Node, AccountOffers,
// NetworkOPs
@@ -215,6 +219,9 @@ JSS ( ip ); // in: Connect, out: OverlayImpl
JSS ( issuer ); // in: RipplePathFind, Subscribe,
// Unsubscribe, BookOffers
// out: paths/Node, STPathSet, STAmount
JSS ( job );
JSS ( job_queue );
JSS ( jobs );
JSS ( jsonrpc ); // json version
JSS ( jq_trans_overflow ); // JobQueue transaction limit overflow.
JSS ( key ); // out
@@ -286,6 +293,7 @@ JSS ( meta ); // out: NetworkOPs, AccountTx*, Tx
JSS ( metaData );
JSS ( metadata ); // out: TransactionEntry
JSS ( method ); // RPC
JSS ( methods );
JSS ( min_count ); // in: GetCounts
JSS ( min_ledger ); // in: LedgerCleaner
JSS ( minimum_fee ); // out: TxQ
@@ -351,6 +359,8 @@ JSS ( quality_in ); // out: AccountLines
JSS ( quality_out ); // out: AccountLines
JSS ( queue ); // in: AccountInfo
JSS ( queue_data ); // out: AccountInfo
JSS ( queued );
JSS ( queued_duration_us );
JSS ( random ); // out: Random
JSS ( raw_meta ); // out: AcceptedLedgerTx
JSS ( receive_currencies ); // out: AccountCurrencies
@@ -369,7 +379,9 @@ JSS ( ripple_lines ); // out: NetworkOPs
JSS ( ripple_state ); // in: LedgerEntr
JSS ( ripplerpc ); // ripple RPC version
JSS ( role ); // out: Ping.cpp
JSS ( rpc );
JSS ( rt_accounts ); // in: Subscribe, Unsubscribe
JSS ( running_duration_us );
JSS ( sanity ); // out: PeerImp
JSS ( search_depth ); // in: RipplePathFind
JSS ( secret ); // in: TransactionSign,
@@ -402,6 +414,7 @@ JSS ( source_currencies ); // in: PathRequest, RipplePathFind
JSS ( source_tag ); // out: AccountChannels
JSS ( stand_alone ); // out: NetworkOPs
JSS ( start ); // in: TxHistory
JSS ( started );
JSS ( state ); // out: Logic.h, ServerState, LedgerData
JSS ( state_accounting ); // out: NetworkOPs
JSS ( state_now ); // in: Subscribe
@@ -422,8 +435,10 @@ JSS ( taker_pays ); // in: Subscribe, Unsubscribe, BookOffers
JSS ( taker_pays_funded ); // out: NetworkOPs
JSS ( threshold ); // in: Blacklist
JSS ( ticket ); // in: AccountObjects
JSS ( time );
JSS ( timeouts ); // out: InboundLedger
JSS ( traffic ); // out: Overlay
JSS ( total ); // out: counters
JSS ( totalCoins ); // out: LedgerToJson
JSS ( total_coins ); // out: LedgerToJson
JSS ( transTreeHash ); // out: ledger/Ledger.cpp
@@ -477,6 +492,7 @@ JSS ( version ); // out: RPCVersion
JSS ( vetoed ); // out: AmendmentTableImpl
JSS ( vote ); // in: Feature
JSS ( warning ); // rpc:
JSS ( workers );
JSS ( write_load ); // out: GetCounts
#undef JSS

View File

@@ -44,8 +44,7 @@ setup_ServerHandler (
std::unique_ptr <ServerHandler>
make_ServerHandler (Application& app, Stoppable& parent, boost::asio::io_service&,
JobQueue&, NetworkOPs&, Resource::Manager&,
CollectorManager& cm);
JobQueue&, NetworkOPs&, Resource::Manager&, CollectorManager& cm);
} // ripple

View File

@@ -19,12 +19,14 @@
#include <ripple/app/main/Application.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/rpc/impl/Handler.h>
namespace ripple {
Json::Value doLogRotate (RPC::Context& context)
{
context.app.getPerfLog().rotate();
return RPC::makeObjectValue (context.app.logs().rotate());
}

View File

@@ -32,7 +32,9 @@ Json::Value doServerInfo (RPC::Context& context)
Json::Value ret (Json::objectValue);
ret[jss::info] = context.netOps.getServerInfo (
true, context.role == Role::ADMIN);
true, context.role == Role::ADMIN,
context.params.isMember(jss::counters) &&
context.params[jss::counters].asBool());
return ret;
}

View File

@@ -32,7 +32,9 @@ Json::Value doServerState (RPC::Context& context)
Json::Value ret (Json::objectValue);
ret[jss::state] = context.netOps.getServerInfo (
false, context.role == Role::ADMIN);
false, context.role == Role::ADMIN,
context.params.isMember(jss::counters) &&
context.params[jss::counters].asBool());
return ret;
}

View File

@@ -55,48 +55,7 @@ Status handle (Context& context, Object& object)
return status;
};
class HandlerTable {
public:
template<std::size_t N>
explicit
HandlerTable (const Handler(&entries)[N])
{
for (std::size_t i = 0; i < N; ++i)
{
auto const& entry = entries[i];
assert (table_.find(entry.name_) == table_.end());
table_[entry.name_] = entry;
}
// This is where the new-style handlers are added.
addHandler<LedgerHandler>();
addHandler<VersionHandler>();
}
const Handler* getHandler(std::string name) const {
auto i = table_.find(name);
return i == table_.end() ? nullptr : &i->second;
}
private:
std::map<std::string, Handler> table_;
template <class HandlerImpl>
void addHandler()
{
assert (table_.find(HandlerImpl::name()) == table_.end());
Handler h;
h.name_ = HandlerImpl::name();
h.valueMethod_ = &handle<Json::Value, HandlerImpl>;
h.role_ = HandlerImpl::role();
h.condition_ = HandlerImpl::condition();
table_[HandlerImpl::name()] = h;
};
};
Handler handlerArray[] {
Handler const handlerArray[] {
// Some handlers not specified here are added to the table via addHandler()
// Request-response methods
{ "account_info", byRef (&doAccountInfo), Role::USER, NO_CONDITION },
@@ -159,12 +118,77 @@ Handler handlerArray[] {
{ "unsubscribe", byRef (&doUnsubscribe), Role::USER, NO_CONDITION },
};
class HandlerTable {
private:
template<std::size_t N>
explicit
HandlerTable (const Handler(&entries)[N])
{
for (std::size_t i = 0; i < N; ++i)
{
auto const& entry = entries[i];
assert (table_.find(entry.name_) == table_.end());
table_[entry.name_] = entry;
}
// This is where the new-style handlers are added.
addHandler<LedgerHandler>();
addHandler<VersionHandler>();
}
public:
static HandlerTable const& instance()
{
static HandlerTable const handlerTable (handlerArray);
return handlerTable;
}
Handler const* getHandler(std::string name) const
{
auto i = table_.find(name);
return i == table_.end() ? nullptr : &i->second;
}
std::vector<char const*>
getHandlerNames() const
{
std::vector<char const*> ret;
ret.reserve(table_.size());
for (auto const& i : table_)
ret.push_back(i.second.name_);
return ret;
}
private:
std::map<std::string, Handler> table_;
template <class HandlerImpl>
void addHandler()
{
assert (table_.find(HandlerImpl::name()) == table_.end());
Handler h;
h.name_ = HandlerImpl::name();
h.valueMethod_ = &handle<Json::Value, HandlerImpl>;
h.role_ = HandlerImpl::role();
h.condition_ = HandlerImpl::condition();
table_[HandlerImpl::name()] = h;
}
};
} // namespace
const Handler* getHandler(std::string const& name) {
static HandlerTable const handlers(handlerArray);
return handlers.getHandler(name);
Handler const* getHandler(std::string const& name)
{
return HandlerTable::instance().getHandler(name);
}
std::vector<char const*>
getHandlerNames()
{
return HandlerTable::instance().getHandlerNames();
};
} // RPC
} // ripple

View File

@@ -23,6 +23,7 @@
#include <ripple/core/Config.h>
#include <ripple/rpc/RPCHandler.h>
#include <ripple/rpc/Status.h>
#include <vector>
namespace Json {
class Object;
@@ -50,7 +51,7 @@ struct Handler
RPC::Condition condition_;
};
const Handler* getHandler (std::string const&);
Handler const* getHandler (std::string const&);
/** Return a Json::objectValue with a single entry. */
template <class Value>
@@ -62,6 +63,9 @@ Json::Value makeObjectValue (
return result;
}
/** Return names of all methods. */
std::vector<char const*> getHandlerNames();
} // RPC
} // ripple

View File

@@ -26,6 +26,7 @@
#include <ripple/app/misc/NetworkOPs.h>
#include <ripple/basics/contract.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/core/Config.h>
#include <ripple/core/JobQueue.h>
#include <ripple/json/Object.h>
@@ -36,6 +37,8 @@
#include <ripple/resource/Fees.h>
#include <ripple/rpc/Role.h>
#include <ripple/resource/Fees.h>
#include <atomic>
#include <chrono>
namespace ripple {
namespace RPC {
@@ -197,14 +200,22 @@ template <class Object, class Method>
Status callMethod (
Context& context, Method method, std::string const& name, Object& result)
{
static std::atomic<std::uint64_t> requestId {0};
auto& perfLog = context.app.getPerfLog();
std::uint64_t const curId = ++requestId;
try
{
perfLog.rpcStart(name, curId);
auto v = context.app.getJobQueue().makeLoadEvent(
jtGENERIC, "cmd:" + name);
return method (context, result);
auto ret = method (context, result);
perfLog.rpcFinish(name, curId);
return ret;
}
catch (std::exception& e)
{
perfLog.rpcError(name, curId);
JLOG (context.j.info()) << "Caught throw: " << e.what ();
if (context.loadType == Resource::feeReferenceRPC)

View File

@@ -25,11 +25,11 @@
#include <ripple/basics/impl/Log.cpp>
#include <ripple/basics/impl/make_SSLContext.cpp>
#include <ripple/basics/impl/mulDiv.cpp>
#include <ripple/basics/impl/PerfLogImp.cpp>
#include <ripple/basics/impl/ResolverAsio.cpp>
#include <ripple/basics/impl/strHex.cpp>
#include <ripple/basics/impl/StringUtilities.cpp>
#include <ripple/basics/impl/Sustain.cpp>
#include <ripple/basics/impl/Time.cpp>
#include <ripple/basics/impl/UptimeTimer.cpp>
#if DOXYGEN

File diff suppressed because it is too large Load Diff

View File

@@ -40,7 +40,7 @@ class ByzantineFailureSim_test : public beast::unit_test::suite
ConsensusParms const parms{};
SimDuration const delay =
round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
PeerGroup a = sim.createGroup(1);
PeerGroup b = sim.createGroup(1);
PeerGroup c = sim.createGroup(1);

View File

@@ -146,7 +146,7 @@ public:
// Connected trust and network graphs with single fixed delay
peers.trustAndConnect(
peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
peers, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
// everyone submits their own ID as a TX
for (Peer * p : peers)
@@ -194,10 +194,11 @@ public:
// Fast and slow network connections
fast.connect(
fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
fast, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
slow.connect(
network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
network,
date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
// All peers submit their own ID as a transaction
for (Peer* peer : network)
@@ -250,10 +251,12 @@ public:
// Fast and slow network connections
fast.connect(
fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
fast,
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
slow.connect(
network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
network,
date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
for (Peer* peer : slow)
peer->runAsValidator = isParticipant;
@@ -377,7 +380,7 @@ public:
network.trust(network);
network.connect(
network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
network, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
// Run consensus without skew until we have a short close time
// resolution
@@ -447,7 +450,7 @@ public:
PeerGroup network = minority + majority;
SimDuration delay =
round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
minority.trustAndConnect(minority + majorityA, delay);
majority.trustAndConnect(majority, delay);
@@ -552,7 +555,8 @@ public:
PeerGroup network = loner + clique;
network.connect(
network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
network,
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
// initial round to set prior state
sim.run(1);
@@ -606,9 +610,10 @@ public:
// Fast and slow network connections
fast.connect(
fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
fast, date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
slow.connect(
network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
network,
date::round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
// Run to the ledger *prior* to decreasing the resolution
sim.run(increaseLedgerTimeResolutionEvery - 2);
@@ -757,7 +762,7 @@ public:
PeerGroup network = a + b;
SimDuration delay =
round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
a.trustAndConnect(a, delay);
b.trustAndConnect(b, delay);
@@ -804,7 +809,7 @@ public:
center.trust(validators);
SimDuration delay =
round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
validators.connect(center, delay);
center[0]->runAsValidator = false;
@@ -928,8 +933,10 @@ public:
PeerGroup groupNotFastC = groupABD + groupCsplit;
PeerGroup network = groupABD + groupCsplit + groupCfast;
SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
SimDuration delay = date::round<milliseconds>(
0.2 * parms.ledgerGRANULARITY);
SimDuration fDelay = date::round<milliseconds>(
0.1 * parms.ledgerGRANULARITY);
network.trust(network);
// C must have a shorter delay to see all the validations before the

View File

@@ -57,7 +57,7 @@ class ScaleFreeSim_test : public beast::unit_test::suite
// nodes with a trust line in either direction are network-connected
network.connectFromTrust(
round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
date::round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
// Initialize collectors to track statistics to report
TxCollector txCollector;

View File

@@ -19,9 +19,66 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <ripple/core/impl/Workers.h>
#include <ripple/beast/unit_test.h>
#include <ripple/basics/PerfLog.h>
#include <ripple/core/JobTypes.h>
#include <ripple/json/json_value.h>
#include <chrono>
#include <cstdint>
#include <memory>
#include <string>
namespace ripple {
/**
* Dummy class for unit tests.
*/
namespace perf {
class PerfLogTest
: public PerfLog
{
void rpcStart(std::string const &method, std::uint64_t requestId) override
{}
void rpcFinish(std::string const &method, std::uint64_t requestId) override
{}
void rpcError(std::string const &method, std::uint64_t dur) override
{}
void jobQueue(JobType const type) override
{}
void jobStart(JobType const type,
std::chrono::microseconds dur,
std::chrono::time_point<std::chrono::steady_clock> startTime,
int instance) override
{}
void jobFinish(JobType const type, std::chrono::microseconds dur,
int instance) override
{}
Json::Value countersJson() const override
{
return Json::Value();
}
Json::Value currentJson() const override
{
return Json::Value();
}
void resizeJobs(int const resize) override
{}
void rotate() override
{}
};
} // perf
//------------------------------------------------------------------------------
class Workers_test : public beast::unit_test::suite
@@ -35,7 +92,7 @@ public:
{
}
void processTask()
void processTask(int instance) override
{
if (--count == 0)
finished.signal();
@@ -51,8 +108,10 @@ public:
" -> " + std::to_string(tc2) + " -> " + std::to_string(tc3));
TestCallback cb;
std::unique_ptr<perf::PerfLog> perfLog =
std::make_unique<perf::PerfLogTest>();
Workers w(cb, "Test", tc1);
Workers w(cb, *perfLog, "Test", tc1);
BEAST_EXPECT(w.getNumberOfThreads() == tc1);
auto testForThreadCount = [this, &cb, &w] (int const threadCount)

View File

@@ -104,7 +104,7 @@ pretty_time(std::ostream& os, std::chrono::duration<Rep, Period> d)
else
{
// use integral
os << round<nanoseconds>(d).count();
os << date::round<nanoseconds>(d).count();
}
os << "ns";
}
@@ -120,7 +120,7 @@ pretty_time(std::ostream& os, std::chrono::duration<Rep, Period> d)
else
{
// use integral
os << round<microseconds>(d).count();
os << date::round<microseconds>(d).count();
}
os << "us";
}
@@ -136,7 +136,7 @@ pretty_time(std::ostream& os, std::chrono::duration<Rep, Period> d)
else
{
// use integral
os << round<milliseconds>(d).count();
os << date::round<milliseconds>(d).count();
}
os << "ms";
}
@@ -152,7 +152,7 @@ pretty_time(std::ostream& os, std::chrono::duration<Rep, Period> d)
else
{
// use integral
os << round<seconds>(d).count();
os << date::round<seconds>(d).count();
}
os << "s";
}
@@ -168,7 +168,7 @@ pretty_time(std::ostream& os, std::chrono::duration<Rep, Period> d)
else
{
// use integral
os << round<minutes>(d).count();
os << date::round<minutes>(d).count();
}
os << "min";
}

View File

@@ -26,6 +26,7 @@
#include <test/basics/hardened_hash_test.cpp>
#include <test/basics/KeyCache_test.cpp>
#include <test/basics/mulDiv_test.cpp>
#include <test/basics/PerfLog_test.cpp>
#include <test/basics/RangeSet_test.cpp>
#include <test/basics/Slice_test.cpp>
#include <test/basics/StringUtilities_test.cpp>