mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-27 22:45:52 +00:00
Merge branch 'extra-cache' into develop
This commit is contained in:
@@ -1,10 +1,8 @@
|
||||
|
||||
#include "Application.h"
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
#include "leveldb/cache.h"
|
||||
#include "leveldb/filter_policy.h"
|
||||
#endif
|
||||
|
||||
#include "AcceptedLedger.h"
|
||||
#include "Config.h"
|
||||
@@ -56,10 +54,7 @@ Application::Application() :
|
||||
mFeatureTable(2 * 7 * 24 * 60 * 60, 200), // two weeks, 200/256
|
||||
|
||||
mRpcDB(NULL), mTxnDB(NULL), mLedgerDB(NULL), mWalletDB(NULL),
|
||||
mNetNodeDB(NULL), mPathFindDB(NULL), mHashNodeDB(NULL),
|
||||
#ifdef USE_LEVELDB
|
||||
mHashNodeLDB(NULL),
|
||||
#endif
|
||||
mNetNodeDB(NULL), mPathFindDB(NULL), mHashNodeDB(NULL), mHashNodeLDB(NULL), mEphemeralLDB(NULL),
|
||||
mConnectionPool(mIOService), mPeerDoor(NULL), mRPCDoor(NULL), mWSPublicDoor(NULL), mWSPrivateDoor(NULL),
|
||||
mSweepTimer(mAuxService), mShutdown(false)
|
||||
{
|
||||
@@ -84,10 +79,11 @@ void Application::stop()
|
||||
mAuxService.stop();
|
||||
mJobQueue.shutdown();
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
delete mHashNodeLDB;
|
||||
mHashNodeLDB = NULL;
|
||||
#endif
|
||||
|
||||
delete mEphemeralLDB;
|
||||
mEphemeralLDB = NULL;
|
||||
|
||||
WriteLog (lsINFO, Application) << "Stopped: " << mIOService.stopped();
|
||||
Instance::shutdown();
|
||||
@@ -164,17 +160,17 @@ void Application::setup()
|
||||
boost::thread t7(boost::bind(&InitDB, &mPathFindDB, "pathfind.db", PathFindDBInit, PathFindDBCount));
|
||||
t4.join(); t6.join(); t7.join();
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
options.block_cache = leveldb::NewLRUCache(theConfig.getSize(siHashNodeDBCache) * 1024 * 1024);
|
||||
if (theConfig.NODE_SIZE >= 2)
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
|
||||
if (theConfig.LDB_IMPORT)
|
||||
options.write_buffer_size = 32 << 20;
|
||||
|
||||
if (mHashedObjectStore.isLevelDB())
|
||||
{
|
||||
WriteLog (lsINFO, Application) << "LevelDB used for nodes";
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
options.block_cache = leveldb::NewLRUCache(theConfig.getSize(siHashNodeDBCache) * 1024 * 1024);
|
||||
if (theConfig.NODE_SIZE >= 2)
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
|
||||
if (theConfig.LDB_IMPORT)
|
||||
options.write_buffer_size = 32 << 20;
|
||||
leveldb::Status status = leveldb::DB::Open(options, (theConfig.DATA_DIR / "hashnode").string(), &mHashNodeLDB);
|
||||
if (!status.ok() || !mHashNodeLDB)
|
||||
{
|
||||
@@ -186,13 +182,24 @@ void Application::setup()
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
WriteLog (lsINFO, Application) << "SQLite used for nodes";
|
||||
boost::thread t5(boost::bind(&InitDB, &mHashNodeDB, "hashnode.db", HashNodeDBInit, HashNodeDBCount));
|
||||
t5.join();
|
||||
}
|
||||
|
||||
if (!theConfig.LDB_EPHEMERAL.empty())
|
||||
{
|
||||
leveldb::Status status = leveldb::DB::Open(options, theConfig.LDB_EPHEMERAL, &mEphemeralLDB);
|
||||
if (!status.ok() || !mEphemeralLDB)
|
||||
{
|
||||
WriteLog(lsFATAL, Application) << "Unable to open/create epehemeral db: "
|
||||
<< theConfig.LDB_EPHEMERAL << " " << status.ToString();
|
||||
StopSustain();
|
||||
exit(3);
|
||||
}
|
||||
}
|
||||
|
||||
mTxnDB->getDB()->setupCheckpointing(&mJobQueue);
|
||||
mLedgerDB->getDB()->setupCheckpointing(&mJobQueue);
|
||||
|
||||
@@ -246,9 +253,7 @@ void Application::setup()
|
||||
|
||||
mLedgerMaster.setMinValidations(theConfig.VALIDATION_QUORUM);
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
if (!mHashedObjectStore.isLevelDB())
|
||||
#endif
|
||||
theApp->getHashNodeDB()->getDB()->executeSQL(boost::str(boost::format("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize(siHashNodeDBCache) * 1024)));
|
||||
|
||||
@@ -414,9 +419,8 @@ Application::~Application()
|
||||
delete mHashNodeDB;
|
||||
delete mNetNodeDB;
|
||||
delete mPathFindDB;
|
||||
#ifdef USE_LEVELDB
|
||||
delete mHashNodeLDB;
|
||||
#endif
|
||||
delete mEphemeralLDB;
|
||||
}
|
||||
|
||||
void Application::startNewLedger()
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
#ifndef __APPLICATION__
|
||||
#define __APPLICATION__
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
#include "leveldb/db.h"
|
||||
#endif
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
@@ -83,9 +81,8 @@ class Application
|
||||
|
||||
DatabaseCon *mRpcDB, *mTxnDB, *mLedgerDB, *mWalletDB, *mNetNodeDB, *mPathFindDB, *mHashNodeDB;
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
leveldb::DB *mHashNodeLDB;
|
||||
#endif
|
||||
leveldb::DB *mEphemeralLDB;
|
||||
|
||||
ConnectionPool mConnectionPool;
|
||||
PeerDoor* mPeerDoor;
|
||||
@@ -156,9 +153,8 @@ public:
|
||||
DatabaseCon* getPathFindDB() { return mPathFindDB; }
|
||||
DatabaseCon* getHashNodeDB() { return mHashNodeDB; }
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
leveldb::DB* getHashNodeLDB() { return mHashNodeLDB; }
|
||||
#endif
|
||||
leveldb::DB* getEphemeralLDB() { return mEphemeralLDB; }
|
||||
|
||||
uint256 getNonce256() { return mNonce256; }
|
||||
std::size_t getNonceST() { return mNonceST; }
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#define SECTION_FEE_ACCOUNT_RESERVE "fee_account_reserve"
|
||||
#define SECTION_FEE_OWNER_RESERVE "fee_owner_reserve"
|
||||
#define SECTION_NODE_DB "node_db"
|
||||
#define SECTION_LDB_EPHEMERAL "ephemeral_db"
|
||||
#define SECTION_LEDGER_HISTORY "ledger_history"
|
||||
#define SECTION_IPS "ips"
|
||||
#define SECTION_NETWORK_QUORUM "network_quorum"
|
||||
@@ -357,6 +358,7 @@ void Config::load()
|
||||
(void) sectionSingleB(secConfig, SECTION_RPC_PASSWORD, RPC_PASSWORD);
|
||||
(void) sectionSingleB(secConfig, SECTION_RPC_USER, RPC_USER);
|
||||
(void) sectionSingleB(secConfig, SECTION_NODE_DB, NODE_DB);
|
||||
(void) sectionSingleB(secConfig, SECTION_LDB_EPHEMERAL, LDB_EPHEMERAL);
|
||||
|
||||
if (sectionSingleB(secConfig, SECTION_RPC_PORT, strTemp))
|
||||
RPC_PORT = boost::lexical_cast<int>(strTemp);
|
||||
|
||||
@@ -85,6 +85,7 @@ public:
|
||||
boost::filesystem::path DEBUG_LOGFILE;
|
||||
boost::filesystem::path VALIDATORS_FILE; // As specifed in rippled.cfg.
|
||||
std::string NODE_DB; // Database to use for nodes
|
||||
std::string LDB_EPHEMERAL; // Database for temporary storage
|
||||
bool LDB_IMPORT; // Import into LevelDB
|
||||
bool ELB_SUPPORT; // Support Amazon ELB
|
||||
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
#include "HashedObject.h"
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
#include "leveldb/db.h"
|
||||
#include "leveldb/write_batch.h"
|
||||
#endif
|
||||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/foreach.hpp>
|
||||
@@ -18,7 +16,7 @@ DECLARE_INSTANCE(HashedObject);
|
||||
|
||||
HashedObjectStore::HashedObjectStore(int cacheSize, int cacheAge) :
|
||||
mCache("HashedObjectStore", cacheSize, cacheAge), mNegativeCache("HashedObjectNegativeCache", 0, 120),
|
||||
mWriteGeneration(0), mWriteLoad(0), mWritePending(false), mLevelDB(false)
|
||||
mWriteGeneration(0), mWriteLoad(0), mWritePending(false), mLevelDB(false), mEphemeralDB(false)
|
||||
{
|
||||
mWriteSet.reserve(128);
|
||||
|
||||
@@ -31,14 +29,8 @@ HashedObjectStore::HashedObjectStore(int cacheSize, int cacheAge) :
|
||||
WriteLog (lsFATAL, HashedObject) << "Incorrect database selection";
|
||||
assert(false);
|
||||
}
|
||||
#ifndef USE_LEVELDB
|
||||
if (mLevelDB)
|
||||
{
|
||||
WriteLog (lsFATAL) << "LevelDB has been selected but not compiled";
|
||||
assert(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!theConfig.LDB_EPHEMERAL.empty())
|
||||
mEphemeralDB = true;
|
||||
}
|
||||
|
||||
void HashedObjectStore::tune(int size, int age)
|
||||
@@ -61,7 +53,73 @@ int HashedObjectStore::getWriteLoad()
|
||||
return std::max(mWriteLoad, static_cast<int>(mWriteSet.size()));
|
||||
}
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
static HashedObject::pointer LLRetrieve(const uint256& hash, leveldb::DB* db)
|
||||
{ // low-level retrieve
|
||||
std::string sData;
|
||||
|
||||
leveldb::Status st = db->Get(leveldb::ReadOptions(),
|
||||
leveldb::Slice(reinterpret_cast<const char *>(hash.begin()), hash.size()), &sData);
|
||||
if (!st.ok())
|
||||
{
|
||||
assert(st.IsNotFound());
|
||||
return HashedObject::pointer();
|
||||
}
|
||||
|
||||
const unsigned char* bufPtr = reinterpret_cast<const unsigned char*>(&sData[0]);
|
||||
uint32 index = htonl(*reinterpret_cast<const uint32*>(bufPtr));
|
||||
int htype = bufPtr[8];
|
||||
|
||||
return boost::make_shared<HashedObject>(static_cast<HashedObjectType>(htype), index,
|
||||
bufPtr + 9, sData.size() - 9, hash);
|
||||
}
|
||||
|
||||
static void LLWrite(boost::shared_ptr<HashedObject> ptr, leveldb::DB* db)
|
||||
{ // low-level write single
|
||||
HashedObject& obj = *ptr;
|
||||
std::vector<unsigned char> rawData(9 + obj.mData.size());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*>(bufPtr + 0) = ntohl(obj.mLedgerIndex);
|
||||
*reinterpret_cast<uint32*>(bufPtr + 4) = ntohl(obj.mLedgerIndex);
|
||||
*(bufPtr + 8) = static_cast<unsigned char>(obj.mType);
|
||||
memcpy(bufPtr + 9, &obj.mData.front(), obj.mData.size());
|
||||
|
||||
leveldb::Status st = db->Put(leveldb::WriteOptions(),
|
||||
leveldb::Slice(reinterpret_cast<const char *>(obj.mHash.begin()), obj.mHash.size()),
|
||||
leveldb::Slice(reinterpret_cast<const char *>(bufPtr), rawData.size()));
|
||||
if (!st.ok())
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Failed to store hash node";
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
static void LLWrite(const std::vector< boost::shared_ptr<HashedObject> >& set, leveldb::DB* db)
|
||||
{ // low-level write set
|
||||
leveldb::WriteBatch batch;
|
||||
|
||||
BOOST_FOREACH(const boost::shared_ptr<HashedObject>& it, set)
|
||||
{
|
||||
const HashedObject& obj = *it;
|
||||
std::vector<unsigned char> rawData(9 + obj.mData.size());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*>(bufPtr + 0) = ntohl(obj.mLedgerIndex);
|
||||
*reinterpret_cast<uint32*>(bufPtr + 4) = ntohl(obj.mLedgerIndex);
|
||||
*(bufPtr + 8) = static_cast<unsigned char>(obj.mType);
|
||||
memcpy(bufPtr + 9, &obj.mData.front(), obj.mData.size());
|
||||
|
||||
batch.Put(leveldb::Slice(reinterpret_cast<const char *>(obj.mHash.begin()), obj.mHash.size()),
|
||||
leveldb::Slice(reinterpret_cast<const char *>(bufPtr), rawData.size()));
|
||||
}
|
||||
|
||||
leveldb::Status st = db->Write(leveldb::WriteOptions(), &batch);
|
||||
if (!st.ok())
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Failed to store hash node";
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
bool HashedObjectStore::storeLevelDB(HashedObjectType type, uint32 index,
|
||||
const std::vector<unsigned char>& data, const uint256& hash)
|
||||
@@ -118,31 +176,9 @@ void HashedObjectStore::bulkWriteLevelDB(Job &)
|
||||
setSize = set.size();
|
||||
}
|
||||
|
||||
{
|
||||
leveldb::WriteBatch batch;
|
||||
|
||||
BOOST_FOREACH(const boost::shared_ptr<HashedObject>& it, set)
|
||||
{
|
||||
const HashedObject& obj = *it;
|
||||
std::vector<unsigned char> rawData(9 + obj.mData.size());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*>(bufPtr + 0) = ntohl(obj.mLedgerIndex);
|
||||
*reinterpret_cast<uint32*>(bufPtr + 4) = ntohl(obj.mLedgerIndex);
|
||||
*(bufPtr + 8) = static_cast<unsigned char>(obj.mType);
|
||||
memcpy(bufPtr + 9, &obj.mData.front(), obj.mData.size());
|
||||
|
||||
batch.Put(leveldb::Slice(reinterpret_cast<const char *>(obj.mHash.begin()), obj.mHash.size()),
|
||||
leveldb::Slice(reinterpret_cast<const char *>(bufPtr), rawData.size()));
|
||||
}
|
||||
|
||||
leveldb::Status st = theApp->getHashNodeLDB()->Write(leveldb::WriteOptions(), &batch);
|
||||
if (!st.ok())
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Failed to store hash node";
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
LLWrite(set, theApp->getHashNodeLDB());
|
||||
if (mEphemeralDB)
|
||||
LLWrite(set, theApp->getEphemeralLDB());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,33 +188,35 @@ HashedObject::pointer HashedObjectStore::retrieveLevelDB(const uint256& hash)
|
||||
if (obj || mNegativeCache.isPresent(hash) || !theApp || !theApp->getHashNodeLDB())
|
||||
return obj;
|
||||
|
||||
std::string sData;
|
||||
|
||||
if (mEphemeralDB)
|
||||
{
|
||||
LoadEvent::autoptr event(theApp->getJobQueue().getLoadEventAP(jtHO_READ, "HOS::retrieve"));
|
||||
leveldb::Status st = theApp->getHashNodeLDB()->Get(leveldb::ReadOptions(),
|
||||
leveldb::Slice(reinterpret_cast<const char *>(hash.begin()), hash.size()), &sData);
|
||||
if (!st.ok())
|
||||
obj = LLRetrieve(hash, theApp->getEphemeralLDB());
|
||||
if (obj)
|
||||
{
|
||||
assert(st.IsNotFound());
|
||||
mCache.canonicalize(hash, obj);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
const unsigned char* bufPtr = reinterpret_cast<const unsigned char*>(&sData[0]);
|
||||
uint32 index = htonl(*reinterpret_cast<const uint32*>(bufPtr));
|
||||
int htype = bufPtr[8];
|
||||
{
|
||||
LoadEvent::autoptr event(theApp->getJobQueue().getLoadEventAP(jtHO_READ, "HOS::retrieve"));
|
||||
obj = LLRetrieve(hash, theApp->getHashNodeLDB());
|
||||
if (!obj)
|
||||
{
|
||||
mNegativeCache.add(hash);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
obj = boost::make_shared<HashedObject>(static_cast<HashedObjectType>(htype), index,
|
||||
bufPtr + 9, sData.size() - 9, hash);
|
||||
mCache.canonicalize(hash, obj);
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite(obj, theApp->getEphemeralLDB());
|
||||
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: in db";
|
||||
return obj;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
bool HashedObjectStore::storeSQLite(HashedObjectType type, uint32 index,
|
||||
const std::vector<unsigned char>& data, const uint256& hash)
|
||||
{ // return: false = already in cache, true = added to cache
|
||||
@@ -210,6 +248,7 @@ bool HashedObjectStore::storeSQLite(HashedObjectType type, uint32 index,
|
||||
// else
|
||||
// WriteLog (lsTRACE, HashedObject) << "HOS: already had " << hash;
|
||||
mNegativeCache.del(hash);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -237,6 +276,9 @@ void HashedObjectStore::bulkWriteSQLite(Job&)
|
||||
|
||||
#ifndef NO_SQLITE3_PREPARE
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite(set, theApp->getEphemeralLDB());
|
||||
|
||||
{
|
||||
Database* db = theApp->getHashNodeDB()->getDB();
|
||||
static SqliteStatement pStB(db->getSqliteDB(), "BEGIN TRANSACTION;", !theConfig.RUN_STANDALONE);
|
||||
@@ -322,6 +364,16 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite(const uint256& hash)
|
||||
if (mNegativeCache.isPresent(hash))
|
||||
return obj;
|
||||
|
||||
if (mEphemeralDB)
|
||||
{
|
||||
obj = LLRetrieve(hash, theApp->getEphemeralLDB());
|
||||
if (obj)
|
||||
{
|
||||
mCache.canonicalize(hash, obj);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
if (!theApp || !theApp->getHashNodeDB())
|
||||
return obj;
|
||||
|
||||
@@ -401,12 +453,13 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite(const uint256& hash)
|
||||
obj = boost::make_shared<HashedObject>(htype, index, data, hash);
|
||||
mCache.canonicalize(hash, obj);
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite(obj, theApp->getEphemeralLDB());
|
||||
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: in db";
|
||||
return obj;
|
||||
}
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
|
||||
int HashedObjectStore::import(const std::string& file)
|
||||
{
|
||||
WriteLog (lsWARNING, HashedObject) << "Hashed object import from \"" << file << "\".";
|
||||
@@ -476,6 +529,4 @@ int HashedObjectStore::import(const std::string& file)
|
||||
return count;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -69,7 +69,7 @@ protected:
|
||||
|
||||
std::vector< boost::shared_ptr<HashedObject> > mWriteSet;
|
||||
bool mWritePending;
|
||||
bool mLevelDB;
|
||||
bool mLevelDB, mEphemeralDB;
|
||||
|
||||
public:
|
||||
|
||||
@@ -82,19 +82,15 @@ public:
|
||||
bool store(HashedObjectType type, uint32 index, const std::vector<unsigned char>& data,
|
||||
const uint256& hash)
|
||||
{
|
||||
#ifdef USE_LEVELDB
|
||||
if (mLevelDB)
|
||||
return storeLevelDB(type, index, data, hash);
|
||||
#endif
|
||||
return storeSQLite(type, index, data, hash);
|
||||
}
|
||||
|
||||
HashedObject::pointer retrieve(const uint256& hash)
|
||||
{
|
||||
#ifdef USE_LEVELDB
|
||||
if (mLevelDB)
|
||||
return retrieveLevelDB(hash);
|
||||
#endif
|
||||
return retrieveSQLite(hash);
|
||||
}
|
||||
|
||||
@@ -103,12 +99,10 @@ public:
|
||||
HashedObject::pointer retrieveSQLite(const uint256& hash);
|
||||
void bulkWriteSQLite(Job&);
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
bool storeLevelDB(HashedObjectType type, uint32 index, const std::vector<unsigned char>& data,
|
||||
const uint256& hash);
|
||||
HashedObject::pointer retrieveLevelDB(const uint256& hash);
|
||||
void bulkWriteLevelDB(Job&);
|
||||
#endif
|
||||
|
||||
|
||||
void waitWrite();
|
||||
|
||||
@@ -72,8 +72,10 @@ RPCHandler::RPCHandler(NetworkOPs* netOps) : mNetOps(netOps), mRole(FORBID)
|
||||
RPCHandler::RPCHandler(NetworkOPs* netOps, InfoSub::pointer infoSub) : mNetOps(netOps), mInfoSub(infoSub), mRole(FORBID)
|
||||
{ ; }
|
||||
|
||||
Json::Value RPCHandler::transactionSign(Json::Value jvRequest, bool bSubmit)
|
||||
Json::Value RPCHandler::transactionSign(Json::Value jvRequest, bool bSubmit, ScopedLock& mlh)
|
||||
{
|
||||
mlh.unlock();
|
||||
|
||||
Json::Value jvResult;
|
||||
RippleAddress naSeed;
|
||||
RippleAddress raSrcAddressID;
|
||||
@@ -108,7 +110,7 @@ Json::Value RPCHandler::transactionSign(Json::Value jvRequest, bool bSubmit)
|
||||
return rpcError(rpcINVALID_PARAMS);
|
||||
}
|
||||
|
||||
AccountState::pointer asSrc = mNetOps->getAccountState(mNetOps->getCurrentLedger(), raSrcAddressID);
|
||||
AccountState::pointer asSrc = mNetOps->getAccountState(mNetOps->getCurrentSnapshot(), raSrcAddressID);
|
||||
if (!asSrc)
|
||||
{
|
||||
WriteLog (lsDEBUG, RPCHandler) << boost::str(boost::format("transactionSign: Failed to find source account in current ledger: %s")
|
||||
@@ -176,7 +178,6 @@ Json::Value RPCHandler::transactionSign(Json::Value jvRequest, bool bSubmit)
|
||||
|
||||
Ledger::pointer lSnapshot = mNetOps->getCurrentSnapshot();
|
||||
{
|
||||
ScopedUnlock su(theApp->getMasterLock());
|
||||
bool bValid;
|
||||
RLCache::pointer cache = boost::make_shared<RLCache>(lSnapshot);
|
||||
Pathfinder pf(cache, raSrcAddressID, dstAccountID,
|
||||
@@ -214,7 +215,7 @@ Json::Value RPCHandler::transactionSign(Json::Value jvRequest, bool bSubmit)
|
||||
if (!txJSON.isMember("Sequence")) txJSON["Sequence"] = asSrc->getSeq();
|
||||
if (!txJSON.isMember("Flags")) txJSON["Flags"] = 0;
|
||||
|
||||
Ledger::pointer lpCurrent = mNetOps->getCurrentLedger();
|
||||
Ledger::pointer lpCurrent = mNetOps->getCurrentSnapshot();
|
||||
SLE::pointer sleAccountRoot = mNetOps->getSLEi(lpCurrent, Ledger::getAccountRootIndex(raSrcAddressID.getAccountID()));
|
||||
|
||||
if (!sleAccountRoot)
|
||||
@@ -1570,7 +1571,7 @@ Json::Value RPCHandler::doRipplePathFind(Json::Value jvRequest, int& cost, Scope
|
||||
Json::Value RPCHandler::doSign(Json::Value jvRequest, int& cost, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
cost = rpcCOST_EXPENSIVE;
|
||||
return transactionSign(jvRequest, false);
|
||||
return transactionSign(jvRequest, false, MasterLockHolder);
|
||||
}
|
||||
|
||||
// {
|
||||
@@ -1581,7 +1582,7 @@ Json::Value RPCHandler::doSubmit(Json::Value jvRequest, int& cost, ScopedLock& M
|
||||
{
|
||||
if (!jvRequest.isMember("tx_blob"))
|
||||
{
|
||||
return transactionSign(jvRequest, true);
|
||||
return transactionSign(jvRequest, true, MasterLockHolder);
|
||||
}
|
||||
|
||||
Json::Value jvResult;
|
||||
|
||||
@@ -32,7 +32,7 @@ class RPCHandler
|
||||
// Utilities
|
||||
void addSubmitPath(Json::Value& txJSON);
|
||||
boost::unordered_set<RippleAddress> parseAccountIds(const Json::Value& jvArray);
|
||||
Json::Value transactionSign(Json::Value jvRequest, bool bSubmit);
|
||||
Json::Value transactionSign(Json::Value jvRequest, bool bSubmit, ScopedLock& mlh);
|
||||
|
||||
Json::Value lookupLedger(Json::Value jvRequest, Ledger::pointer& lpLedger);
|
||||
|
||||
|
||||
@@ -113,7 +113,6 @@ void Application::updateTables(bool ldbImport)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef USE_LEVELDB
|
||||
if (theApp->getHashedObjectStore().isLevelDB())
|
||||
{
|
||||
boost::filesystem::path hashPath = theConfig.DATA_DIR / "hashnode.db";
|
||||
@@ -134,5 +133,4 @@ void Application::updateTables(bool ldbImport)
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -150,9 +150,7 @@ int main(int argc, char* argv[])
|
||||
("start", "Start from a fresh Ledger.")
|
||||
("net", "Get the initial ledger from the network.")
|
||||
("fg", "Run in the foreground.")
|
||||
#ifdef USE_LEVELDB
|
||||
("import", "Import SQLite node DB into LevelDB.")
|
||||
#endif
|
||||
;
|
||||
|
||||
// Interpret positional arguments as --parameters.
|
||||
|
||||
Reference in New Issue
Block a user