mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 01:07:57 +00:00
Merge branch 'master' of github.com:jedmccaleb/NewCoin
This commit is contained in:
@@ -122,6 +122,7 @@
|
||||
<ClCompile Include="src\cpp\ripple\LedgerMaster.cpp" />
|
||||
<ClCompile Include="src\cpp\ripple\LedgerProposal.cpp" />
|
||||
<ClCompile Include="src\cpp\ripple\LedgerTiming.cpp" />
|
||||
<ClCompile Include="src\cpp\ripple\LoadMonitor.cpp" />
|
||||
<ClCompile Include="src\cpp\ripple\Log.cpp" />
|
||||
<ClCompile Include="src\cpp\ripple\main.cpp" />
|
||||
<ClCompile Include="src\cpp\ripple\NetworkOPs.cpp" />
|
||||
|
||||
@@ -345,6 +345,9 @@
|
||||
<ClCompile Include="src\cpp\ripple\WalletAddTransactor.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="src\cpp\ripple\LoadMonitor.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="util\pugiconfig.hpp">
|
||||
|
||||
@@ -60,6 +60,7 @@ void HashedObjectStore::waitWrite()
|
||||
|
||||
void HashedObjectStore::bulkWrite()
|
||||
{
|
||||
LoadEvent::pointer event = theApp->getJobQueue().getLoadEvent(jtDISK);
|
||||
while (1)
|
||||
{
|
||||
std::vector< boost::shared_ptr<HashedObject> > set;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
#include "JobQueue.h"
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/bind.hpp>
|
||||
#include <boost/thread.hpp>
|
||||
@@ -21,6 +20,9 @@ const char* Job::toString(JobType t)
|
||||
case jtPROPOSAL_t: return "trustedProposal";
|
||||
case jtADMIN: return "administration";
|
||||
case jtDEATH: return "jobOfDeath";
|
||||
case jtCLIENT: return "clientCommand";
|
||||
case jtPEER: return "peerCommand";
|
||||
case jtDISK: return "diskAccess";
|
||||
default: assert(false); return "unknown";
|
||||
}
|
||||
}
|
||||
@@ -68,7 +70,7 @@ void JobQueue::addJob(JobType type, const boost::function<void(Job&)>& jobFunc)
|
||||
boost::mutex::scoped_lock sl(mJobLock);
|
||||
assert(mThreadCount != 0); // do not add jobs to a queue with no threads
|
||||
|
||||
mJobSet.insert(Job(type, ++mLastJob, jobFunc));
|
||||
mJobSet.insert(Job(type, ++mLastJob, mJobLoads[type], jobFunc));
|
||||
++mJobCounts[type];
|
||||
mJobCond.notify_one();
|
||||
}
|
||||
@@ -108,6 +110,43 @@ std::vector< std::pair<JobType, int> > JobQueue::getJobCounts()
|
||||
return ret;
|
||||
}
|
||||
|
||||
Json::Value JobQueue::getJson(int)
|
||||
{
|
||||
Json::Value ret(Json::objectValue);
|
||||
boost::mutex::scoped_lock sl(mJobLock);
|
||||
|
||||
ret["threads"] = mThreadCount;
|
||||
|
||||
Json::Value priorities = Json::arrayValue;
|
||||
for (int i = 0; i < NUM_JOB_TYPES; ++i)
|
||||
{
|
||||
uint64 count, latencyAvg, latencyPeak, jobCount;
|
||||
mJobLoads[i].getCountAndLatency(count, latencyAvg, latencyPeak);
|
||||
std::map<JobType, int>::iterator it = mJobCounts.find(static_cast<JobType>(i));
|
||||
if (it == mJobCounts.end())
|
||||
jobCount = 0;
|
||||
else
|
||||
jobCount = it->second;
|
||||
if ((count != 0) || (jobCount != 0) || (latencyPeak != 0))
|
||||
{
|
||||
Json::Value pri(Json::objectValue);
|
||||
pri["job_type"] = Job::toString(static_cast<JobType>(i));
|
||||
if (jobCount != 0)
|
||||
pri["waiting"] = static_cast<int>(jobCount);
|
||||
if (count != 0)
|
||||
pri["per_second"] = static_cast<int>(count);
|
||||
if (latencyPeak != 0)
|
||||
pri["peak_latency"] = static_cast<int>(latencyPeak);
|
||||
if (latencyAvg != 0)
|
||||
pri["avg_latency"] = static_cast<int>(latencyAvg);
|
||||
priorities.append(pri);
|
||||
}
|
||||
}
|
||||
ret["job_types"] = priorities;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void JobQueue::shutdown()
|
||||
{ // shut down the job queue without completing pending jobs
|
||||
cLog(lsINFO) << "Job queue shutting down";
|
||||
|
||||
@@ -8,26 +8,36 @@
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/thread/condition_variable.hpp>
|
||||
#include <boost/function.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
#include "../json/value.h"
|
||||
|
||||
#include "types.h"
|
||||
#include "LoadMonitor.h"
|
||||
|
||||
// Note that this queue should only be used for CPU-bound jobs
|
||||
// It is primarily intended for signature checking
|
||||
|
||||
enum JobType
|
||||
{ // must be in priority order, low to high
|
||||
jtINVALID,
|
||||
jtVALIDATION_ut, // A validation from an untrusted source
|
||||
jtCLIENTOP_ut, // A client operation from a non-local/untrusted source
|
||||
jtTRANSACTION, // A transaction received from the network
|
||||
jtPROPOSAL_ut, // A proposal from an untrusted source
|
||||
jtCLIENTOP_t, // A client operation from a trusted source
|
||||
jtVALIDATION_t, // A validation from a trusted source
|
||||
jtTRANSACTION_l, // A local transaction
|
||||
jtPROPOSAL_t, // A proposal from a trusted source
|
||||
jtADMIN, // An administrative operation
|
||||
jtDEATH, // job of death, used internally
|
||||
jtINVALID = -1,
|
||||
jtVALIDATION_ut = 0, // A validation from an untrusted source
|
||||
jtCLIENTOP_ut = 1, // A client operation from a non-local/untrusted source
|
||||
jtTRANSACTION = 2, // A transaction received from the network
|
||||
jtPROPOSAL_ut = 3, // A proposal from an untrusted source
|
||||
jtCLIENTOP_t = 4, // A client operation from a trusted source
|
||||
jtVALIDATION_t = 5, // A validation from a trusted source
|
||||
jtTRANSACTION_l = 6, // A local transaction
|
||||
jtPROPOSAL_t = 7, // A proposal from a trusted source
|
||||
jtADMIN = 8, // An administrative operation
|
||||
jtDEATH = 9, // job of death, used internally
|
||||
|
||||
// special types not dispatched by the job pool
|
||||
jtCLIENT = 10,
|
||||
jtPEER = 11,
|
||||
jtDISK = 12,
|
||||
};
|
||||
#define NUM_JOB_TYPES 16
|
||||
|
||||
class Job
|
||||
{
|
||||
@@ -35,13 +45,18 @@ protected:
|
||||
JobType mType;
|
||||
uint64 mJobIndex;
|
||||
boost::function<void(Job&)> mJob;
|
||||
LoadEvent::pointer mLoadMonitor;
|
||||
|
||||
public:
|
||||
Job() : mType(jtINVALID), mJobIndex(0) { ; }
|
||||
Job(JobType type, uint64 index) : mType(type), mJobIndex(index) { ; }
|
||||
|
||||
Job(JobType type, uint64 index, const boost::function<void(Job&)>& job)
|
||||
: mType(type), mJobIndex(index), mJob(job) { ; }
|
||||
Job() : mType(jtINVALID), mJobIndex(0) { ; }
|
||||
|
||||
Job(JobType type, uint64 index) : mType(type), mJobIndex(index)
|
||||
{ ; }
|
||||
|
||||
Job(JobType type, uint64 index, LoadMonitor& lm, const boost::function<void(Job&)>& job)
|
||||
: mType(type), mJobIndex(index), mJob(job)
|
||||
{ mLoadMonitor = boost::make_shared<LoadEvent>(boost::ref(lm), true, 1); }
|
||||
|
||||
JobType getType() const { return mType; }
|
||||
void doJob(void) { mJob(*this); }
|
||||
@@ -63,6 +78,7 @@ protected:
|
||||
uint64 mLastJob;
|
||||
std::set<Job> mJobSet;
|
||||
std::map<JobType, int> mJobCounts;
|
||||
LoadMonitor mJobLoads[NUM_JOB_TYPES];
|
||||
int mThreadCount;
|
||||
bool mShuttingDown;
|
||||
|
||||
@@ -81,6 +97,11 @@ public:
|
||||
|
||||
void shutdown();
|
||||
void setThreadCount(int c = 0);
|
||||
|
||||
LoadEvent::pointer getLoadEvent(JobType t)
|
||||
{ return boost::make_shared<LoadEvent>(boost::ref(mJobLoads[t]), true, 1); }
|
||||
|
||||
Json::Value getJson(int c = 0);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -341,6 +341,7 @@ uint256 Ledger::getHash()
|
||||
|
||||
void Ledger::saveAcceptedLedger(bool fromConsensus)
|
||||
{ // can be called in a different thread
|
||||
LoadEvent::pointer event = theApp->getJobQueue().getLoadEvent(jtDISK);
|
||||
cLog(lsTRACE) << "saveAcceptedLedger " << (fromConsensus ? "fromConsensus " : "fromAcquire ") << getLedgerSeq();
|
||||
static boost::format ledgerExists("SELECT LedgerSeq FROM Ledgers where LedgerSeq = %d;");
|
||||
static boost::format deleteLedger("DELETE FROM Ledgers WHERE LedgerSeq = %d;");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include "LoadMonitor.h"
|
||||
|
||||
void LoadMonitor::LoadMonitor::update()
|
||||
void LoadMonitor::update()
|
||||
{ // call with the mutex
|
||||
time_t now = time(NULL);
|
||||
|
||||
@@ -11,7 +11,8 @@ void LoadMonitor::LoadMonitor::update()
|
||||
{ // way out of date
|
||||
mCounts = 0;
|
||||
mLatencyEvents = 0;
|
||||
mLatencyMS = 0;
|
||||
mLatencyMSAvg = 0;
|
||||
mLatencyMSPeak = 0;
|
||||
mLastUpdate = now;
|
||||
return;
|
||||
}
|
||||
@@ -19,9 +20,10 @@ void LoadMonitor::LoadMonitor::update()
|
||||
do
|
||||
{ // do exponential decay
|
||||
++mLastUpdate;
|
||||
mCounts -= (mCounts / 4);
|
||||
mLatencyEvents -= (mLatencyEvents / 4);
|
||||
mLatencyMS -= (mLatencyMS / 4);
|
||||
mCounts -= ((mCounts + 3) / 4);
|
||||
mLatencyEvents -= ((mLatencyEvents + 3) / 4);
|
||||
mLatencyMSAvg -= (mLatencyMSAvg / 4);
|
||||
mLatencyMSPeak -= (mLatencyMSPeak / 4);
|
||||
} while (mLastUpdate < now);
|
||||
}
|
||||
|
||||
@@ -35,24 +37,39 @@ void LoadMonitor::addCount(int counts)
|
||||
|
||||
void LoadMonitor::addLatency(int latency)
|
||||
{
|
||||
if (latency == 1)
|
||||
latency = 0;
|
||||
boost::mutex::scoped_lock sl(mLock);
|
||||
|
||||
update();
|
||||
|
||||
++mLatencyEvents;
|
||||
mLatencyMS += latency;
|
||||
mLatencyMSAvg += latency;
|
||||
mLatencyMSPeak += latency;
|
||||
|
||||
int lp = mLatencyEvents * latency * 4;
|
||||
if (mLatencyMSPeak < lp)
|
||||
mLatencyMSPeak = lp;
|
||||
}
|
||||
|
||||
void LoadMonitor::addCountAndLatency(int counts, int latency)
|
||||
{
|
||||
if (latency == 1)
|
||||
latency = 0;
|
||||
boost::mutex::scoped_lock sl(mLock);
|
||||
|
||||
update();
|
||||
mCounts += counts;
|
||||
++mLatencyEvents;
|
||||
mLatencyMS += latency;
|
||||
mLatencyMSAvg += latency;
|
||||
mLatencyMSPeak += latency;
|
||||
|
||||
int lp = mLatencyEvents * latency * 4;
|
||||
if (mLatencyMSPeak < lp)
|
||||
mLatencyMSPeak = lp;
|
||||
}
|
||||
|
||||
void LoadMonitor::getCountAndLatency(uint64& count, uint64& latency)
|
||||
void LoadMonitor::getCountAndLatency(uint64& count, uint64& latencyAvg, uint64& latencyPeak)
|
||||
{
|
||||
boost::mutex::scoped_lock sl(mLock);
|
||||
|
||||
@@ -61,6 +78,13 @@ void LoadMonitor::getCountAndLatency(uint64& count, uint64& latency)
|
||||
count = mCounts / 4;
|
||||
|
||||
if (mLatencyEvents == 0)
|
||||
latency = 0;
|
||||
else latency = mLatencyMS / (mLatencyEvents * 4);
|
||||
{
|
||||
latencyAvg = 0;
|
||||
latencyPeak = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
latencyAvg = mLatencyMSAvg / (mLatencyEvents * 4);
|
||||
latencyPeak = mLatencyMSPeak / (mLatencyEvents * 4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <string>
|
||||
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include "types.h"
|
||||
|
||||
@@ -12,32 +13,31 @@
|
||||
class LoadMonitor
|
||||
{
|
||||
protected:
|
||||
std::string mName;
|
||||
uint64 mCounts;
|
||||
uint64 mLatencyEvents;
|
||||
uint64 mLatencyMS;
|
||||
uint64 mLatencyMSAvg;
|
||||
uint64 mLatencyMSPeak;
|
||||
time_t mLastUpdate;
|
||||
boost::mutex mLock;
|
||||
|
||||
void update();
|
||||
|
||||
public:
|
||||
LoadMonitor(const std::string& n) : mName(n), mCounts(0), mLatencyEvents(0), mLatencyMS(0)
|
||||
LoadMonitor() : mCounts(0), mLatencyEvents(0), mLatencyMSAvg(0), mLatencyMSPeak(0)
|
||||
{ mLastUpdate = time(NULL); }
|
||||
|
||||
void setName(const std::string& n) { mName = n; }
|
||||
|
||||
const std::string& getName() const { return mName; }
|
||||
|
||||
void addCount(int counts);
|
||||
void addLatency(int latency);
|
||||
void addCountAndLatency(int counts, int latency);
|
||||
|
||||
void getCountAndLatency(uint64& count, uint64& latency);
|
||||
void getCountAndLatency(uint64& count, uint64& latencyAvg, uint64& latencyPeak);
|
||||
};
|
||||
|
||||
class LoadEvent
|
||||
{
|
||||
public:
|
||||
typedef boost::shared_ptr<LoadEvent> pointer;
|
||||
|
||||
protected:
|
||||
LoadMonitor& mMonitor;
|
||||
bool mRunning;
|
||||
|
||||
@@ -957,22 +957,7 @@ Json::Value NetworkOPs::getServerInfo()
|
||||
if (mConsensus)
|
||||
info["consensus"] = mConsensus->getJson();
|
||||
|
||||
typedef std::pair<JobType, int> jt_int_pair;
|
||||
bool anyJobs = false;
|
||||
Json::Value jobs = Json::arrayValue;
|
||||
std::vector< std::pair<JobType, int> > jobCounts = theApp->getJobQueue().getJobCounts();
|
||||
BOOST_FOREACH(jt_int_pair& it, jobCounts)
|
||||
{
|
||||
if (it.second != 0)
|
||||
{
|
||||
Json::Value o = Json::objectValue;
|
||||
o[Job::toString(it.first)] = it.second;
|
||||
jobs.append(o);
|
||||
anyJobs = true;
|
||||
}
|
||||
}
|
||||
if (anyJobs)
|
||||
info["jobs"] = jobs;
|
||||
info["load"] = theApp->getJobQueue().getJson();
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
@@ -4,12 +4,15 @@
|
||||
|
||||
#define RIPPLE_PATHS_MAX 3
|
||||
|
||||
// TODO: only have the higher fee if the account doesn't in fact exist
|
||||
// only have the higher fee if the account doesn't in fact exist
|
||||
void PaymentTransactor::calculateFee()
|
||||
{
|
||||
if (mTxn.getFlags() & tfCreateAccount)
|
||||
{
|
||||
mFeeDue = theConfig.FEE_ACCOUNT_CREATE;
|
||||
const uint160 uDstAccountID = mTxn.getFieldAccount160(sfDestination);
|
||||
SLE::pointer sleDst = mEngine->entryCache(ltACCOUNT_ROOT, Ledger::getAccountRootIndex(uDstAccountID));
|
||||
if(!sleDst) mFeeDue = theConfig.FEE_ACCOUNT_CREATE;
|
||||
else Transactor::calculateFee();
|
||||
}else Transactor::calculateFee();
|
||||
}
|
||||
|
||||
@@ -146,7 +149,13 @@ TER PaymentTransactor::doApply()
|
||||
else
|
||||
{
|
||||
mTxnAccount->setFieldAmount(sfBalance, saSrcXRPBalance - saDstAmount);
|
||||
sleDst->setFieldAmount(sfBalance, sleDst->getFieldAmount(sfBalance) + saDstAmount);
|
||||
// re-arm the password change fee if we can and need to
|
||||
if ( (sleDst->getFlags() & lsfPasswordSpent) &&
|
||||
(saDstAmount > theConfig.FEE_DEFAULT) )
|
||||
{
|
||||
sleDst->setFieldAmount(sfBalance, sleDst->getFieldAmount(sfBalance) + saDstAmount-theConfig.FEE_DEFAULT);
|
||||
sleDst->clearFlag(lsfPasswordSpent);
|
||||
}else sleDst->setFieldAmount(sfBalance, sleDst->getFieldAmount(sfBalance) + saDstAmount);
|
||||
|
||||
terResult = tesSUCCESS;
|
||||
}
|
||||
|
||||
@@ -373,8 +373,11 @@ void Peer::processReadBuffer()
|
||||
|
||||
// std::cerr << "Peer::processReadBuffer: " << mIpPort.first << " " << mIpPort.second << std::endl;
|
||||
|
||||
// If connected and get a mtHELLO or if not connected and get a non-mtHELLO, wrong message was sent.
|
||||
LoadEvent::pointer event = theApp->getJobQueue().getLoadEvent(jtPEER);
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl(theApp->getMasterLock());
|
||||
|
||||
// If connected and get a mtHELLO or if not connected and get a non-mtHELLO, wrong message was sent.
|
||||
if (mHelloed == (type == ripple::mtHELLO))
|
||||
{
|
||||
cLog(lsWARNING) << "Wrong message type: " << type;
|
||||
|
||||
@@ -4,42 +4,29 @@
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
// TODO:
|
||||
TER RegularKeySetTransactor::checkSig()
|
||||
{
|
||||
// Transaction's signing public key must be for the source account.
|
||||
// To prove the master private key made this transaction.
|
||||
if (mSigningPubKey.getAccountID() != mTxnAccountID)
|
||||
{
|
||||
// Signing Pub Key must be for Source Account ID.
|
||||
cLog(lsWARNING) << "sourceAccountID: " << mSigningPubKey.humanAccountID();
|
||||
cLog(lsWARNING) << "txn accountID: " << mTxn.getSourceAccount().humanAccountID();
|
||||
|
||||
return temBAD_SET_ID;
|
||||
}
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
// TODO: this should be default fee if flag isn't set
|
||||
void RegularKeySetTransactor::calculateFee()
|
||||
{
|
||||
mFeeDue = 0;
|
||||
Transactor::calculateFee();
|
||||
|
||||
if ( !(mTxnAccount->getFlags() & lsfPasswordSpent) &&
|
||||
(mSigningPubKey.getAccountID() == mTxnAccountID))
|
||||
{ // flag is armed and they signed with the right account
|
||||
|
||||
mSourceBalance = mTxnAccount->getFieldAmount(sfBalance);
|
||||
if(mSourceBalance < mFeeDue) mFeeDue = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// TODO: change to take a fee if there is one there
|
||||
TER RegularKeySetTransactor::doApply()
|
||||
{
|
||||
std::cerr << "doRegularKeySet>" << std::endl;
|
||||
|
||||
if (mTxnAccount->getFlags() & lsfPasswordSpent)
|
||||
if(mFeeDue.isZero())
|
||||
{
|
||||
std::cerr << "doRegularKeySet: Delay transaction: Funds already spent." << std::endl;
|
||||
|
||||
return terFUNDS_SPENT;
|
||||
}
|
||||
|
||||
mTxnAccount->setFlag(lsfPasswordSpent);
|
||||
}
|
||||
|
||||
uint160 uAuthKeyID=mTxn.getFieldAccount160(sfRegularKey);
|
||||
mTxnAccount->setFieldAccount(sfRegularKey, uAuthKeyID);
|
||||
|
||||
@@ -6,6 +6,5 @@ class RegularKeySetTransactor : public Transactor
|
||||
public:
|
||||
RegularKeySetTransactor(const SerializedTransaction& txn,TransactionEngineParams params, TransactionEngine* engine) : Transactor(txn,params,engine) {}
|
||||
TER checkFee();
|
||||
TER checkSig();
|
||||
TER doApply();
|
||||
};
|
||||
|
||||
@@ -289,6 +289,7 @@ void ValidationCollection::condWrite()
|
||||
|
||||
void ValidationCollection::doWrite()
|
||||
{
|
||||
LoadEvent::pointer event = theApp->getJobQueue().getLoadEvent(jtDISK);
|
||||
static boost::format insVal("INSERT INTO LedgerValidations "
|
||||
"(LedgerHash,NodePubKey,Flags,SignTime,Signature) VALUES ('%s','%s','%u','%u',%s);");
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef __WSHANDLER__
|
||||
#define __WSHANDLER__
|
||||
|
||||
#include "Application.h"
|
||||
|
||||
class WSConnection;
|
||||
|
||||
// A single instance of this object is made.
|
||||
@@ -87,6 +89,7 @@ public:
|
||||
|
||||
void on_message(connection_ptr cpClient, message_ptr mpMessage)
|
||||
{
|
||||
LoadEvent::pointer event = theApp->getJobQueue().getLoadEvent(jtCLIENT);
|
||||
Json::Value jvRequest;
|
||||
Json::Reader jrReader;
|
||||
|
||||
|
||||
@@ -15,13 +15,14 @@ var serverDelay = 1500;
|
||||
|
||||
buster.testRunner.timeout = 5000;
|
||||
|
||||
|
||||
/*
|
||||
buster.testCase("Simple", {
|
||||
buster.testCase("Fee Changes", {
|
||||
'setUp' : testutils.build_setup({no_server: true}), //
|
||||
'tearDown' : testutils.build_teardown(),
|
||||
|
||||
"simple." :
|
||||
function (done) { buster.assert(1);
|
||||
"varying the fee for Payment" :
|
||||
function (done) {
|
||||
|
||||
this.remote.transaction()
|
||||
.payment('root', 'alice', "10000")
|
||||
@@ -36,8 +37,8 @@ buster.testCase("Simple", {
|
||||
}).submit();
|
||||
|
||||
}
|
||||
}); */
|
||||
|
||||
});
|
||||
*/
|
||||
buster.testCase("Sending", {
|
||||
'setUp' : testutils.build_setup(),
|
||||
'tearDown' : testutils.build_teardown(),
|
||||
|
||||
Reference in New Issue
Block a user