Travis CI improvements:

FIXES: #2527

* define custom docker image for travis-linux builds based on
  package build image
* add macos builds
* add windows builds (currently allowed to fail)
* improve build and shell scripts as required for the CI envs
* add asio timer latency workaround
* omit several manual tests from TravisCI which cause memory exhaustion
This commit is contained in:
Mike Ellery
2019-05-20 10:53:24 -07:00
committed by Nik Bougalis
parent 87e9ee5ce9
commit 13a4fefe34
18 changed files with 1300 additions and 591 deletions

View File

@@ -19,8 +19,17 @@
#include <ripple/beast/asio/io_latency_probe.h>
#include <ripple/beast/unit_test.h>
#include <beast/test/yield_to.hpp>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/deadline_timer.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/optional.hpp>
#include <algorithm>
#include <chrono>
#include <mutex>
#include <numeric>
#include <thread>
#include <vector>
using namespace std::chrono_literals;
@@ -29,6 +38,81 @@ class io_latency_probe_test :
{
using MyTimer = boost::asio::basic_waitable_timer<std::chrono::steady_clock>;
#ifdef RIPPLED_RUNNING_IN_CI
/**
* @brief attempt to measure inaccuracy of asio waitable timers
*
* This class is needed in some VM/CI environments where
* timer inaccuracy impacts the io_probe tests below.
*
*/
template <class Clock, class MeasureClock = std::chrono::high_resolution_clock>
struct measure_asio_timers
{
using duration = typename Clock::duration;
using rep = typename MeasureClock::duration::rep;
std::vector<duration> elapsed_times_;
measure_asio_timers(duration interval = 100ms, size_t num_samples = 50)
{
using namespace std::chrono;
boost::asio::io_service ios;
boost::optional<boost::asio::io_service::work> work {ios};
std::thread worker { [&]{ ios.run(); } };
boost::asio::basic_waitable_timer<Clock> timer {ios};
elapsed_times_.reserve (num_samples);
std::mutex gate;
boost::system::error_code wait_err;
while (--num_samples)
{
auto const start {MeasureClock::now()};
timer.expires_after (interval);
gate.lock ();
timer.async_wait ( [&] (boost::system::error_code const& ec) {
if (ec)
wait_err = ec;
auto const end {MeasureClock::now()};
elapsed_times_.emplace_back (end-start);
gate.unlock ();
});
std::unique_lock <std::mutex> waithere {gate};
}
work = boost::none;
worker.join();
if (wait_err)
boost::asio::detail::throw_error(wait_err, "wait");
}
template <class D>
auto getMean()
{
double sum = {0};
for (auto const& v : elapsed_times_)
{
sum += static_cast<double>(
std::chrono::duration_cast<D>(v).count());
}
return sum / elapsed_times_.size();
}
template <class D>
auto getMax()
{
return std::chrono::duration_cast<D>(*std::max_element(
elapsed_times_.begin(),elapsed_times_.end())).count();
}
template <class D>
auto getMin()
{
return std::chrono::duration_cast<D>(*std::min_element(
elapsed_times_.begin(),elapsed_times_.end())).count();
}
};
#endif
struct test_sampler
{
beast::io_latency_probe <std::chrono::steady_clock> probe_;
@@ -79,14 +163,38 @@ class io_latency_probe_test :
{
testcase << "sample ongoing";
boost::system::error_code ec;
test_sampler io_probe {99ms, get_io_service()};
using namespace std::chrono;
auto interval = 99ms;
auto probe_duration = 1s;
size_t expected_probe_count_max = (probe_duration/interval);
size_t expected_probe_count_min = expected_probe_count_max;
#ifdef RIPPLED_RUNNING_IN_CI
// adjust min expected based on measurements
// if running in CI/VM environment
measure_asio_timers<steady_clock> tt {interval};
log << "measured mean for timers: "
<< tt.getMean<milliseconds>() << "ms\n";
log << "measured max for timers: "
<< tt.getMax<milliseconds>() << "ms\n";
expected_probe_count_min =
static_cast<size_t>(
duration_cast<milliseconds>(probe_duration).count())
/ static_cast<size_t>(tt.getMean<milliseconds>());
#endif
log << "expected_probe_count_min: " << expected_probe_count_min << "\n";
log << "expected_probe_count_max: " << expected_probe_count_max << "\n";
test_sampler io_probe {interval, get_io_service()};
io_probe.start();
MyTimer timer {get_io_service(), 1s};
MyTimer timer {get_io_service(), probe_duration};
timer.async_wait(yield[ec]);
if(! BEAST_EXPECTS(! ec, ec.message()))
return;
auto probes_seen = io_probe.durations_.size();
BEAST_EXPECTS(probes_seen >=9 && probes_seen <= 11,
BEAST_EXPECTS(
probes_seen >= (expected_probe_count_min - 1) &&
probes_seen <= (expected_probe_count_max + 1),
std::string("probe count is ") + std::to_string(probes_seen));
io_probe.probe_.cancel_async();
// wait again in order to flush the remaining