diff --git a/.gitignore b/.gitignore index 6157c1787e..37b1e0a575 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,7 @@ DerivedData # Intel Parallel Studio 2013 XE My Amplifier XE Results - RippleD + +# KeyvaDB files +*.key +*.val diff --git a/BeastConfig.h b/BeastConfig.h index 47adbde46b..3b82014da9 100644 --- a/BeastConfig.h +++ b/BeastConfig.h @@ -120,6 +120,19 @@ //#define BEAST_BIND_USES_TR1 1 //#define BEAST_BIND_USES_BOOST 1 -//#define BEAST_UNIT_TESTS 1 +//------------------------------------------------------------------------------ +// +// Ripple compilation settings +// +//------------------------------------------------------------------------------ + +/** Config: RIPPLE_VERIFY_NODEOBJECT_KEYS + + This verifies that the hash of node objects matches the payload. + It is quite expensive so normally this is turned off! +*/ +#ifndef RIPPLE_VERIFY_NODEOBJECT_KEYS +//#define RIPPLE_VERIFY_NODEOBJECT_KEYS 1 +#endif #endif diff --git a/Builds/QtCreator/rippled.pro b/Builds/QtCreator/rippled.pro index 89ebd28df4..9858ac395a 100644 --- a/Builds/QtCreator/rippled.pro +++ b/Builds/QtCreator/rippled.pro @@ -63,6 +63,7 @@ SOURCES += \ ../../Subtrees/beast/modules/beast_basics/beast_basics.cpp \ ../../Subtrees/beast/modules/beast_core/beast_core.cpp \ ../../Subtrees/beast/modules/beast_crypto/beast_crypto.cpp \ + ../../Subtrees/beast/modules/beast_db/beast_db.cpp \ ../../modules/ripple_app/ripple_app_pt1.cpp \ ../../modules/ripple_app/ripple_app_pt2.cpp \ ../../modules/ripple_app/ripple_app_pt3.cpp \ diff --git a/Builds/VisualStudio2012/RippleD.vcxproj b/Builds/VisualStudio2012/RippleD.vcxproj index e7e4b5088f..bdf45f6cc1 100644 --- a/Builds/VisualStudio2012/RippleD.vcxproj +++ b/Builds/VisualStudio2012/RippleD.vcxproj @@ -157,6 +157,24 @@ true true + + true + true + true + true + + + true + true + true + true + + + true + true + true + true + true true @@ -802,12 +820,6 @@ true true - - true - true - true - true - true true @@ -1027,6 +1039,7 @@ + true true @@ -1402,6 +1415,9 @@ + + + @@ -1441,6 +1457,7 @@ + @@ -1732,7 +1749,7 @@ Disabled - _CRTDBG_MAP_ALLOC;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) ProgramDatabase false MultiThreadedDebug diff --git a/Builds/VisualStudio2012/RippleD.vcxproj.filters b/Builds/VisualStudio2012/RippleD.vcxproj.filters index 7d7ff164cf..beaa45d3af 100644 --- a/Builds/VisualStudio2012/RippleD.vcxproj.filters +++ b/Builds/VisualStudio2012/RippleD.vcxproj.filters @@ -594,9 +594,6 @@ [1] Ripple\ripple_app\_misc - - [1] Ripple\ripple_app\_misc - [1] Ripple\ripple_app\_misc @@ -807,9 +804,6 @@ [1] Ripple\ripple_app\node - - [1] Ripple\ripple_app\node - [1] Ripple\ripple_mdb @@ -897,6 +891,21 @@ [1] Ripple\ripple_app\node + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_app\node + + + [0] Subtrees\beast + @@ -963,9 +972,6 @@ [1] Ripple\ripple_basics\utility - - [1] Ripple\ripple_basics\utility - [1] Ripple\ripple_basics\utility @@ -1581,9 +1587,6 @@ [1] Ripple\ripple_app\node - - [1] Ripple\ripple_app\node - [1] Ripple\ripple_mdb @@ -1674,6 +1677,24 @@ [1] Ripple\ripple_app\node + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_app\node + + + [1] Ripple\ripple_basics\utility + + + [1] Ripple\ripple_core\functional + diff --git a/SConstruct b/SConstruct index 903d5bf877..69461f7c6f 100644 --- a/SConstruct +++ b/SConstruct @@ -122,6 +122,7 @@ COMPILED_FILES = [ 'Subtrees/beast/modules/beast_basics/beast_basics.cpp', 'Subtrees/beast/modules/beast_core/beast_core.cpp', 'Subtrees/beast/modules/beast_crypto/beast_crypto.cpp', + 'Subtrees/beast/modules/beast_db/beast_db.cpp', 'modules/ripple_app/ripple_app_pt1.cpp', 'modules/ripple_app/ripple_app_pt2.cpp', 'modules/ripple_app/ripple_app_pt3.cpp', diff --git a/Subtrees/README.md b/Subtrees/README.md index 51435b4def..457688b264 100644 --- a/Subtrees/README.md +++ b/Subtrees/README.md @@ -21,6 +21,19 @@ Branch ripple-fork ``` +## LightningDB (a.k.a. MDB) + +A supposedly fast memory-mapped key value database system + +Repository
+``` +git://gitorious.org/mdb/mdb.git +``` +Branch +``` +mdb.master +``` + ## websocket Ripple's fork of websocketpp has some incompatible changes and Ripple specific includes. diff --git a/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h b/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h index ebf0d9a7bd..0a19d0402d 100644 --- a/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h +++ b/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h @@ -120,6 +120,4 @@ //#define BEAST_BIND_USES_TR1 1 //#define BEAST_BIND_USES_BOOST 1 -#define BEAST_UNIT_TESTS 1 - #endif diff --git a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj index 8309bb27d2..99c88c4500 100644 --- a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj +++ b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj @@ -78,6 +78,12 @@ true true + + true + true + true + true +
@@ -134,12 +140,14 @@ + + @@ -162,6 +170,7 @@ + @@ -246,6 +255,8 @@ + + @@ -407,6 +418,12 @@ true true
+ + true + true + true + true + true true @@ -437,6 +454,12 @@ true true + + true + true + true + true + true true @@ -918,6 +941,13 @@ true true + + + true + true + true + true + diff --git a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters index 7b777f5085..7f02acb998 100644 --- a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters +++ b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters @@ -36,6 +36,9 @@ beast_basics + + beast_db + @@ -125,6 +128,12 @@ {1170f2bc-2456-410a-ab2b-c45f6ed37b9e} + + {4834218f-f13f-41bc-a8a0-50314a3a99a3} + + + {15a98fee-1b52-45eb-9480-514b8750d755} + @@ -623,6 +632,21 @@ beast_core\containers + + beast_core\files + + + beast_core\diagnostic + + + beast_core\memory + + + beast_db + + + beast_db\keyvalue + @@ -967,6 +991,18 @@ beast_crypto\math + + beast_core\files + + + beast_core\diagnostic + + + beast_db + + + beast_db\keyvalue + diff --git a/Subtrees/beast/TODO.txt b/Subtrees/beast/TODO.txt index a5faea1456..e4a9e8f073 100644 --- a/Subtrees/beast/TODO.txt +++ b/Subtrees/beast/TODO.txt @@ -2,6 +2,12 @@ BEAST TODO -------------------------------------------------------------------------------- +- Specialize UnsignedInteger<> for peformance in the storage format + +- Macro for acquiring a ScopedLock that records file and line. + +- Rename HeapBlock routines to not conflict with _CRTDBG_MAP_ALLOC macros + - Design a WeakPtr / SharedPtr / SharedObject intrusive system - Implement beast::Bimap? diff --git a/Subtrees/beast/modules/beast_core/beast_core.cpp b/Subtrees/beast/modules/beast_core/beast_core.cpp index 82966a182e..f2387873fe 100644 --- a/Subtrees/beast/modules/beast_core/beast_core.cpp +++ b/Subtrees/beast/modules/beast_core/beast_core.cpp @@ -149,12 +149,14 @@ namespace beast #include "diagnostic/beast_FPUFlags.cpp" #include "diagnostic/beast_LeakChecked.cpp" #include "diagnostic/beast_UnitTest.cpp" +#include "diagnostic/beast_UnitTestUtilities.cpp" #include "files/beast_DirectoryIterator.cpp" #include "files/beast_File.cpp" #include "files/beast_FileInputStream.cpp" #include "files/beast_FileOutputStream.cpp" #include "files/beast_FileSearchPath.cpp" +#include "files/beast_RandomAccessFile.cpp" #include "files/beast_TemporaryFile.cpp" #include "json/beast_JSON.cpp" diff --git a/Subtrees/beast/modules/beast_core/beast_core.h b/Subtrees/beast/modules/beast_core/beast_core.h index a0d9a3042f..c19c149f27 100644 --- a/Subtrees/beast/modules/beast_core/beast_core.h +++ b/Subtrees/beast/modules/beast_core/beast_core.h @@ -226,6 +226,7 @@ namespace beast #include "diagnostic/beast_Error.h" #include "diagnostic/beast_FPUFlags.h" #include "diagnostic/beast_UnitTest.h" +#include "diagnostic/beast_UnitTestUtilities.h" #include "diagnostic/beast_Throw.h" #include "containers/beast_AbstractFifo.h" #include "containers/beast_Array.h" @@ -252,6 +253,7 @@ namespace beast #include "files/beast_FileOutputStream.h" #include "files/beast_FileSearchPath.h" #include "files/beast_MemoryMappedFile.h" +#include "files/beast_RandomAccessFile.h" #include "files/beast_TemporaryFile.h" #include "json/beast_JSON.h" #include "logging/beast_FileLogger.h" @@ -274,6 +276,7 @@ namespace beast #include "memory/beast_WeakReference.h" #include "memory/beast_MemoryAlignment.h" #include "memory/beast_CacheLine.h" +#include "memory/beast_RecycledObjectPool.h" #include "misc/beast_Result.h" #include "misc/beast_Uuid.h" #include "misc/beast_WindowsRegistry.h" diff --git a/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp b/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp index 0e3dd1236d..32f3ff5f70 100644 --- a/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp +++ b/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp @@ -129,7 +129,7 @@ void AbstractFifo::finishedRead (int numRead) noexcept class AbstractFifoTests : public UnitTest { public: - AbstractFifoTests() : UnitTest ("Abstract Fifo") + AbstractFifoTests() : UnitTest ("Abstract Fifo", "beast") { } @@ -224,6 +224,4 @@ public: } }; -#if BEAST_UNIT_TESTS static AbstractFifoTests abstractFifoTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp index 41903dc311..8905ed2a12 100644 --- a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp +++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp @@ -21,8 +21,13 @@ */ //============================================================================== -UnitTest::UnitTest (const String& name_) - : name (name_), runner (nullptr) +UnitTest::UnitTest (String const& name, + String const& group, + When when) + : m_name (name) + , m_group (group) + , m_when (when) + , m_runner (nullptr) { getAllTests().add (this); } @@ -32,19 +37,25 @@ UnitTest::~UnitTest() getAllTests().removeFirstMatchingValue (this); } -Array& UnitTest::getAllTests() +UnitTest::TestList& UnitTest::getAllTests() { - static Array tests; - return tests; + static TestList s_tests; + + return s_tests; } -void UnitTest::initialise() {} -void UnitTest::shutdown() {} - -void UnitTest::performTest (UnitTests* const runner_) +void UnitTest::initialise() { - bassert (runner_ != nullptr); - runner = runner_; +} + +void UnitTest::shutdown() +{ +} + +void UnitTest::performTest (UnitTests* const runner) +{ + bassert (runner != nullptr); + m_runner = runner; initialise(); runTest(); @@ -53,23 +64,24 @@ void UnitTest::performTest (UnitTests* const runner_) void UnitTest::logMessage (const String& message) { - runner->logMessage (message); + m_runner->logMessage (message); } void UnitTest::beginTest (const String& testName) { - runner->beginNewTest (this, testName); + m_runner->beginNewTest (this, testName); } void UnitTest::expect (const bool result, const String& failureMessage) { if (result) - runner->addPass(); + m_runner->addPass(); else - runner->addFail (failureMessage); + m_runner->addFail (failureMessage); } //============================================================================== + UnitTests::UnitTests() : currentTest (nullptr), assertOnFailure (true), @@ -105,8 +117,52 @@ void UnitTests::resultsUpdated() { } -void UnitTests::runTests (const Array& tests) +void UnitTests::runTest (UnitTest& test) { + try + { + test.performTest (this); + } + catch (std::exception& e) + { + String s; + s << "Got an exception: " << e.what (); + addFail (s); + } + catch (...) + { + addFail ("Got an unhandled exception"); + } +} + +void UnitTests::runTest (String const& name) +{ + results.clear(); + resultsUpdated(); + + UnitTest::TestList& tests (UnitTest::getAllTests ()); + + for (int i = 0; i < tests.size(); ++i) + { + UnitTest& test = *tests [i]; + + if (test.getGroup () == name && test.getWhen () == UnitTest::runAlways) + { + runTest (test); + } + else if (test.getName () == name) + { + runTest (test); + break; + } + + } +} + +void UnitTests::runAllTests () +{ + UnitTest::TestList& tests (UnitTest::getAllTests ()); + results.clear(); resultsUpdated(); @@ -115,22 +171,14 @@ void UnitTests::runTests (const Array& tests) if (shouldAbortTests()) break; - try - { - tests.getUnchecked(i)->performTest (this); - } - catch (...) - { - addFail ("An unhandled exception was thrown!"); - } + UnitTest& test = *tests [i]; + + if (test.getWhen () == UnitTest::runAlways) + runTest (test); } endTest(); -} -void UnitTests::runAllTests() -{ - runTests (UnitTest::getAllTests()); } void UnitTests::logMessage (const String& message) @@ -150,14 +198,14 @@ void UnitTests::beginNewTest (UnitTest* const test, const String& subCategory) TestResult* const r = new TestResult(); results.add (r); - r->unitTestName = test->getName(); + r->unitTestName = test->getGroup() + "::" + test->getName(); r->subcategoryName = subCategory; r->passes = 0; r->failures = 0; - logMessage ("Test: " + r->unitTestName + "/" + subCategory + "..."); + logMessage ("Test '" + r->unitTestName + "': " + subCategory); - resultsUpdated(); + resultsUpdated (); } void UnitTests::endTest() @@ -214,8 +262,8 @@ void UnitTests::addFail (const String& failureMessage) r->failures++; - String message ("!!! Test "); - message << (r->failures + r->passes) << " failed"; + String message ("Failure, #"); + message << (r->failures + r->passes); if (failureMessage.isNotEmpty()) message << ": " << failureMessage; diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h index 237f1ba89e..f7e8466c18 100644 --- a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h +++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h @@ -28,7 +28,6 @@ #include "../containers/beast_OwnedArray.h" class UnitTests; - /** This is a base class for classes that perform a unit test. To write a test using this class, your code should look something like this: @@ -56,9 +55,10 @@ class UnitTests; } }; - // Explicit template instantiation is required to make the unit - // test get automatically added to the set of unit tests. - template class UnitTestType ; + // This makes the unit test available in the global list + // It doesn't have to be static. + // + static MyTest myTest; @endcode @@ -69,15 +69,38 @@ class UnitTests; class BEAST_API UnitTest : Uncopyable { public: + enum When + { + runAlways, + runManual + }; + + /** The type of a list of tests. + */ + typedef Array TestList; + //============================================================================== - /** Creates a test with the given name. */ - explicit UnitTest (String const& name); + /** Creates a test with the given name, group, and run option. + + The group is used when you want to run all tests in a particular group + instead of all tests in general. The run option allows you to write some + tests that are only available manually. For examplem, a performance unit + test that takes a long time which you might not want to run every time + you run all tests. + */ + explicit UnitTest (String const& name, String const& group = "", When when = runAlways); /** Destructor. */ virtual ~UnitTest(); /** Returns the name of the test. */ - const String& getName() const noexcept { return name; } + const String& getName() const noexcept { return m_name; } + + /** Returns the group of the test. */ + String const& getGroup () const noexcept { return m_group; } + + /** Returns the run option of the test. */ + When getWhen () const noexcept { return m_when; } /** Runs the test, using the specified UnitTests. You shouldn't need to call this method directly - use @@ -86,7 +109,7 @@ public: void performTest (UnitTests* runner); /** Returns the set of all UnitTest objects that currently exist. */ - static Array& getAllTests(); + static TestList& getAllTests(); //============================================================================== /** You can optionally implement this method to set up your test. @@ -155,14 +178,16 @@ public: //============================================================================== /** Writes a message to the test log. - This can only be called from within your runTest() method. + This can only be called during your runTest() method. */ void logMessage (const String& message); private: //============================================================================== - const String name; - UnitTests* runner; + String const m_name; + String const m_group; + When const m_when; + UnitTests* m_runner; }; //============================================================================== @@ -187,12 +212,14 @@ public: /** Destructor. */ virtual ~UnitTests(); - /** Runs a set of tests. - - The tests are performed in order, and the results are logged. To run all the - registered UnitTest objects that exist, use runAllTests(). + /** Run the specified unit test. + + Subclasses can override this to do extra stuff. */ - void runTests (const Array& tests); + virtual void runTest (UnitTest& test); + + /** Run a particular test or group. */ + void runTest (String const& name); /** Runs all the UnitTest objects that currently exist. This calls runTests() for all the objects listed in UnitTest::getAllTests(). diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.cpp b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.cpp new file mode 100644 index 0000000000..0a553cffa4 --- /dev/null +++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.cpp @@ -0,0 +1,56 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +class UnitTestUtilitiesTests : public UnitTest +{ +public: + UnitTestUtilitiesTests () : UnitTest ("UnitTestUtilities", "beast") + { + } + + void testPayload () + { + using namespace UnitTestUtilities; + + int const maxBufferSize = 4000; + int const minimumBytes = 1; + int const numberOfItems = 100; + int64 const seedValue = 50; + + beginTest ("Payload"); + + Payload p1 (maxBufferSize); + Payload p2 (maxBufferSize); + + for (int i = 0; i < numberOfItems; ++i) + { + p1.repeatableRandomFill (minimumBytes, maxBufferSize, seedValue); + p2.repeatableRandomFill (minimumBytes, maxBufferSize, seedValue); + + expect (p1 == p2, "Should be equal"); + } + } + + void runTest () + { + testPayload (); + } +}; + +static UnitTestUtilitiesTests unitTestUtilitiesTests; diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.h b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.h new file mode 100644 index 0000000000..b2fa7792c0 --- /dev/null +++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.h @@ -0,0 +1,100 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef BEAST_UNITTESTUTILITIES_H_INCLUDED +#define BEAST_UNITTESTUTILITIES_H_INCLUDED + +#include "../maths/beast_Random.h" + +namespace UnitTestUtilities +{ + +/** Fairly shuffle an array pseudo-randomly. +*/ +template +void repeatableShuffle (int const numberOfItems, T& arrayOfItems, int64 seedValue) +{ + Random r (seedValue); + + for (int i = numberOfItems - 1; i > 0; --i) + { + int const choice = r.nextInt (i + 1); + + std::swap (arrayOfItems [i], arrayOfItems [choice]); + } +} + +/** A block of memory used for test data. +*/ +struct Payload +{ + /** Construct a payload with a buffer of the specified maximum size. + + @param maximumBytes The size of the buffer, in bytes. + */ + explicit Payload (int maxBufferSize) + : bufferSize (maxBufferSize) + , data (maxBufferSize) + { + } + + /** Generate a random block of data within a certain size range. + + @param minimumBytes The smallest number of bytes in the resulting payload. + @param maximumBytes The largest number of bytes in the resulting payload. + @param seedValue The value to seed the random number generator with. + */ + void repeatableRandomFill (int minimumBytes, int maximumBytes, int64 seedValue) noexcept + { + bassert (minimumBytes >=0 && maximumBytes <= bufferSize); + + Random r (seedValue); + + bytes = minimumBytes + r.nextInt (1 + maximumBytes - minimumBytes); + + bassert (bytes >= minimumBytes && bytes <= bufferSize); + + for (int i = 0; i < bytes; ++i) + data [i] = static_cast (r.nextInt ()); + } + + /** Compare two payloads for equality. + */ + bool operator== (Payload const& other) const noexcept + { + if (bytes == other.bytes) + { + return memcmp (data.getData (), other.data.getData (), bytes) == 0; + } + else + { + return false; + } + } + +public: + int const bufferSize; + + int bytes; + HeapBlock data; +}; + +} + +#endif diff --git a/Subtrees/beast/modules/beast_core/files/beast_File.cpp b/Subtrees/beast/modules/beast_core/files/beast_File.cpp index 7ef709e324..55bc1cafbd 100644 --- a/Subtrees/beast/modules/beast_core/files/beast_File.cpp +++ b/Subtrees/beast/modules/beast_core/files/beast_File.cpp @@ -926,7 +926,7 @@ MemoryMappedFile::MemoryMappedFile (const File& file, const Range& fileRa class FileTests : public UnitTest { public: - FileTests() : UnitTest ("File") {} + FileTests() : UnitTest ("File", "beast") {} void runTest() { @@ -1106,7 +1106,5 @@ public: } }; -#if BEAST_UNIT_TESTS static FileTests fileTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp index 368eb9c438..a3a0e5e3c6 100644 --- a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp +++ b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp @@ -114,7 +114,7 @@ bool FileOutputStream::write (const void* const src, const size_t numBytes) return true; } -void FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes) +bool FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes) { bassert (((ssize_t) numBytes) >= 0); @@ -123,9 +123,8 @@ void FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes) memset (buffer + bytesInBuffer, byte, numBytes); bytesInBuffer += numBytes; currentPosition += numBytes; + return true; } - else - { - OutputStream::writeRepeatedByte (byte, numBytes); - } -} + + return OutputStream::writeRepeatedByte (byte, numBytes); +} \ No newline at end of file diff --git a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h index 5f358ecd63..e4110492c9 100644 --- a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h +++ b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h @@ -27,7 +27,6 @@ #include "beast_File.h" #include "../streams/beast_OutputStream.h" - //============================================================================== /** An output stream that writes into a local file. @@ -87,11 +86,11 @@ public: Result truncate(); //============================================================================== - void flush(); - int64 getPosition(); - bool setPosition (int64 pos); - bool write (const void* data, size_t numBytes); - void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat); + void flush() override; + int64 getPosition() override; + bool setPosition (int64) override; + bool write (const void*, size_t) override; + bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) override; private: @@ -111,4 +110,4 @@ private: ssize_t writeInternal (const void*, size_t); }; -#endif // BEAST_FILEOUTPUTSTREAM_BEASTHEADER +#endif \ No newline at end of file diff --git a/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.cpp b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.cpp new file mode 100644 index 0000000000..28c029cd8b --- /dev/null +++ b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.cpp @@ -0,0 +1,272 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +RandomAccessFile::RandomAccessFile () noexcept + : fileHandle (nullptr) + , currentPosition (0) +{ +} + +RandomAccessFile::~RandomAccessFile () +{ + close (); +} + +Result RandomAccessFile::open (File const& path, Mode mode) +{ + close (); + + return nativeOpen (path, mode); +} + +void RandomAccessFile::close () +{ + if (isOpen ()) + { + nativeFlush (); + nativeClose (); + } +} + +Result RandomAccessFile::setPosition (FileOffset newPosition) +{ + if (newPosition != currentPosition) + { + // VFALCO NOTE I dislike return from the middle but + // Result::ok() is showing up in the profile + // + return nativeSetPosition (newPosition); + } + + return Result::ok (); +} + +Result RandomAccessFile::read (void* buffer, ByteCount numBytes, ByteCount* pActualAmount) +{ + return nativeRead (buffer, numBytes, pActualAmount); +} + +Result RandomAccessFile::write (const void* data, ByteCount numBytes, ByteCount* pActualAmount) +{ + bassert (data != nullptr && ((ssize_t) numBytes) >= 0); + + Result result (Result::ok ()); + + ByteCount amountWritten = 0; + + result = nativeWrite (data, numBytes, &amountWritten); + + if (result.wasOk ()) + currentPosition += amountWritten; + + if (pActualAmount != nullptr) + *pActualAmount = amountWritten; + + return result; +} + +Result RandomAccessFile::truncate () +{ + Result result = flush (); + + if (result.wasOk ()) + result = nativeTruncate (); + + return result; +} + +Result RandomAccessFile::flush () +{ + return nativeFlush (); +} + +//------------------------------------------------------------------------------ + +class RandomAccessFileTests : public UnitTest +{ +public: + RandomAccessFileTests () : UnitTest ("RandomAccessFile", "beast") + { + } + + enum + { + maxPayload = 8192 + }; + + /* For this test we will create a file which consists of a fixed + number of variable length records. Each record is numbered sequentially + starting at 0. To calculate the position of each record we first build + a table of size/offset pairs using a pseudorandom number generator. + */ + struct Record + { + int index; + int bytes; + int offset; + }; + + typedef HeapBlock Records; + + // Produce the pseudo-random set of records. + static void createRecords (HeapBlock & records, + int numRecords, + int maxBytes, + int64 seedValue) + { + using namespace UnitTestUtilities; + + Random r (seedValue); + + records.malloc (numRecords); + + int offset = 0; + + for (int i = 0; i < numRecords; ++i) + { + int const bytes = r.nextInt (maxBytes) + 1; + + records [i].index = i; + records [i].bytes = bytes; + records [i].offset = offset; + + offset += bytes; + } + + repeatableShuffle (numRecords, records, seedValue); + } + + // Write all the records to the file. + // The payload is pseudo-randomly generated. + void writeRecords (RandomAccessFile& file, + int numRecords, + HeapBlock const& records, + int64 seedValue) + { + using namespace UnitTestUtilities; + + for (int i = 0; i < numRecords; ++i) + { + Payload p (records [i].bytes); + + p.repeatableRandomFill (records [i].bytes, + records [i].bytes, + records [i].index + seedValue); + + file.setPosition (records [i].offset); + + Result result = file.write (p.data.getData (), p.bytes); + + expect (result.wasOk (), "Should be ok"); + } + } + + // Read the records and verify the consistency. + void readRecords (RandomAccessFile& file, + int numRecords, + HeapBlock const& records, + int64 seedValue) + { + using namespace UnitTestUtilities; + + for (int i = 0; i < numRecords; ++i) + { + Record const& record (records [i]); + + int const bytes = record.bytes; + + Payload p1 (bytes); + Payload p2 (bytes); + + p1.repeatableRandomFill (bytes, bytes, record.index + seedValue); + + file.setPosition (record.offset); + + Result result = file.read (p2.data.getData (), bytes); + + expect (result.wasOk (), "Should be ok"); + + if (result.wasOk ()) + { + p2.bytes = bytes; + + expect (p1 == p2, "Should be equal"); + } + } + } + + // Perform the test at the given buffer size. + void testFile (int const numRecords) + { + using namespace UnitTestUtilities; + + int const seedValue = 50; + + beginTest (String ("numRecords=") + String (numRecords)); + + // Calculate the path + File const path (File::createTempFile ("RandomAccessFile")); + + // Create a predictable set of records + HeapBlock records (numRecords); + createRecords (records, numRecords, maxPayload, seedValue); + + Result result (Result::ok ()); + + { + // Create the file + RandomAccessFile file; + result = file.open (path, RandomAccessFile::readWrite); + expect (result.wasOk (), "Should be ok"); + + if (result.wasOk ()) + { + writeRecords (file, numRecords, records, seedValue); + + readRecords (file, numRecords, records, seedValue); + + repeatableShuffle (numRecords, records, seedValue); + + readRecords (file, numRecords, records, seedValue); + } + } + + if (result.wasOk ()) + { + // Re-open the file in read only mode + RandomAccessFile file; + result = file.open (path, RandomAccessFile::readOnly); + expect (result.wasOk (), "Should be ok"); + + if (result.wasOk ()) + { + readRecords (file, numRecords, records, seedValue); + } + } + } + + void runTest () + { + testFile (10000); + } + +private: +}; + +static RandomAccessFileTests randomAccessFileTests; diff --git a/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h new file mode 100644 index 0000000000..2b7c9505c6 --- /dev/null +++ b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h @@ -0,0 +1,197 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef BEAST_RANDOMACCESSFILE_H_INCLUDED +#define BEAST_RANDOMACCESSFILE_H_INCLUDED + +#include "../misc/beast_Result.h" + +/** Provides random access reading and writing to an operating system file. + + This class wraps the underlying native operating system routines for + opening and closing a file for reading and/or writing, seeking within + the file, and performing read and write operations. There are also methods + provided for obtaining an input or output stream which will work with + the file. + + @note All files are opened in binary mode. No text newline conversions + are performed. + + @note None of these members are thread safe. The caller is responsible + for synchronization. + + @see FileInputStream, FileOutputStream +*/ +class BEAST_API RandomAccessFile : Uncopyable, LeakChecked +{ +public: + /** The type of an FileOffset. + + This can be useful when writing templates. + */ + typedef int64 FileOffset; + + /** The type of a byte count. + + This can be useful when writing templates. + */ + typedef size_t ByteCount; + + /** The access mode. + + @see open + */ + enum Mode + { + readOnly, + readWrite + }; + + //============================================================================== + /** Creates an unopened file object. + + @see open, isOpen + */ + RandomAccessFile () noexcept; + + /** Destroy the file object. + + If the operating system file is open it will be closed. + */ + ~RandomAccessFile (); + + /** Determine if a file is open. + + @return `true` if the operating system file is open. + */ + bool isOpen () const noexcept { return fileHandle != nullptr; } + + /** Opens a file object. + + The file is opened with the specified permissions. The initial + position is set to the beginning of the file. + + @note If a file is already open, it will be closed first. + + @param path The path to the file + @param mode The access permissions + @return An indication of the success of the operation. + + @see Mode + */ + Result open (File const& path, Mode mode); + + /** Closes the file object. + + Any data that needs to be flushed will be written before the file is closed. + + @note If no file is opened, this call does nothing. + */ + void close (); + + /** Retrieve the @ref File associated with this object. + + @return The associated @ref File. + */ + File const& getFile () const noexcept { return file; } + + /** Get the current position. + + The next read or write will take place from here. + + @return The current position, as an absolute byte FileOffset from the begining. + */ + FileOffset getPosition () const noexcept { return currentPosition; } + + /** Set the current position. + + The next read or write will take place at this location. + + @param newPosition The byte FileOffset from the beginning of the file to move to. + + @return `true` if the operation was successful. + */ + Result setPosition (FileOffset newPosition); + + /** Read data at the current position. + + The caller is responsible for making sure that the memory pointed to + by `buffer` is at least as large as `bytesToRead`. + + @note The file must have been opened with read permission. + + @param buffer The memory to store the incoming data + @param numBytes The number of bytes to read. + @param pActualAmount Pointer to store the actual amount read, or `nullptr`. + + @return `true` if all the bytes were read. + */ + Result read (void* buffer, ByteCount numBytes, ByteCount* pActualAmount = 0); + + /** Write data at the current position. + + The current position is advanced past the data written. If data is + written past the end of the file, the file size is increased on disk. + + The caller is responsible for making sure that the memory pointed to + by `buffer` is at least as large as `bytesToWrite`. + + @note The file must have been opened with write permission. + + @param data A pointer to the data buffer to write to the file. + @param numBytes The number of bytes to write. + @param pActualAmount Pointer to store the actual amount written, or `nullptr`. + + @return `true` if all the data was written. + */ + Result write (const void* data, ByteCount numBytes, ByteCount* pActualAmount = 0); + + /** Truncate the file at the current position. + */ + Result truncate (); + + /** Flush the output buffers. + + This calls the operating system to make sure all data has been written. + */ + Result flush(); + + //============================================================================== +private: + // Some of these these methods are implemented natively on + // the corresponding platform. + // + // See beast_posix_SharedCode.h and beast_win32_Files.cpp + // + Result nativeOpen (File const& path, Mode mode); + void nativeClose (); + Result nativeSetPosition (FileOffset newPosition); + Result nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount = 0); + Result nativeWrite (const void* data, ByteCount numBytes, ByteCount* pActualAmount = 0); + Result nativeTruncate (); + Result nativeFlush (); + +private: + File file; + void* fileHandle; + FileOffset currentPosition; +}; + +#endif + diff --git a/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp b/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp index d712896a1f..216bdf7741 100644 --- a/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp +++ b/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp @@ -535,7 +535,7 @@ void JSON::writeToStream (OutputStream& output, const var& data, const bool allO class JSONTests : public UnitTest { public: - JSONTests() : UnitTest ("JSON") { } + JSONTests() : UnitTest ("JSON", "beast") { } static String createRandomWideCharString (Random& r) { @@ -639,6 +639,4 @@ public: } }; -#if BEAST_UNIT_TESTS static JSONTests jsonTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp b/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp index ce3199699c..9f381e8479 100644 --- a/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp +++ b/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp @@ -24,6 +24,7 @@ Random::Random (const int64 seedValue) noexcept : seed (seedValue) { + nextInt (); // fixes a bug where the first int is always 0 } Random::Random() @@ -39,6 +40,8 @@ Random::~Random() noexcept void Random::setSeed (const int64 newSeed) noexcept { seed = newSeed; + + nextInt (); // fixes a bug where the first int is always 0 } void Random::combineSeed (const int64 seedValue) noexcept @@ -56,6 +59,8 @@ void Random::setSeedRandomly() combineSeed (Time::getHighResolutionTicksPerSecond()); combineSeed (Time::currentTimeMillis()); globalSeed ^= seed; + + nextInt (); // fixes a bug where the first int is always 0 } Random& Random::getSystemRandom() noexcept @@ -98,6 +103,23 @@ double Random::nextDouble() noexcept return static_cast (nextInt()) / (double) 0xffffffff; } +void Random::nextBlob (void* buffer, size_t bytes) +{ + int const remainder = bytes % sizeof (int64); + + { + int64* dest = static_cast (buffer); + for (int i = bytes / sizeof (int64); i > 0; --i) + *dest++ = nextInt64 (); + buffer = dest; + } + + { + int64 const val = nextInt64 (); + memcpy (buffer, &val, remainder); + } +} + BigInteger Random::nextLargeNumber (const BigInteger& maximumValue) { BigInteger n; @@ -137,7 +159,7 @@ void Random::fillBitsRandomly (BigInteger& arrayToChange, int startBit, int numB class RandomTests : public UnitTest { public: - RandomTests() : UnitTest ("Random") {} + RandomTests() : UnitTest ("Random", "beast") {} void runTest() { @@ -165,6 +187,4 @@ public: } }; -#if BEAST_UNIT_TESTS static RandomTests randomTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/maths/beast_Random.h b/Subtrees/beast/modules/beast_core/maths/beast_Random.h index f35c0eed92..1e68b1959c 100644 --- a/Subtrees/beast/modules/beast_core/maths/beast_Random.h +++ b/Subtrees/beast/modules/beast_core/maths/beast_Random.h @@ -89,6 +89,10 @@ public: */ bool nextBool() noexcept; + /** Fills a piece of memory with random data. + */ + void nextBlob (void* buffer, size_t bytes); + /** Returns a BigInteger containing a random number. @returns a random value in the range 0 to (maximumValue - 1). diff --git a/Subtrees/beast/modules/beast_core/memory/beast_RecycledObjectPool.h b/Subtrees/beast/modules/beast_core/memory/beast_RecycledObjectPool.h new file mode 100644 index 0000000000..6981427bf5 --- /dev/null +++ b/Subtrees/beast/modules/beast_core/memory/beast_RecycledObjectPool.h @@ -0,0 +1,126 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef BEAST_RECYCLEDOBJECTPOOL_H_INCLUDED +#define BEAST_RECYCLEDOBJECTPOOL_H_INCLUDED + +/** A pool of objects which may be recycled. + + This is a thread safe pool of objects that get re-used. It is + primarily designed to eliminate the need for many memory allocations + and frees when temporary buffers are needed for operations. + + To use it, first declare a structure containing the information + that you want to recycle. Then when you want to use a recycled object + put a ScopedItem on your stack: + + @code + + struct StdString + { + std::string data; + }; + + RecycledObjectPool pool; + + void foo () + { + RecycledObjectPool ::ScopedItem item; + + item.getObject ().data = "text"; + } + + @endcode +*/ +template +class RecycledObjectPool +{ +public: + struct Item : Object, LockFreeStack ::Node, LeakChecked + { + }; + + class ScopedItem + { + public: + explicit ScopedItem (RecycledObjectPool & pool) + : m_pool (pool) + , m_item (pool.get ()) + { + } + + ~ScopedItem () + { + m_pool.release (m_item); + } + + Object& getObject () noexcept + { + return *m_item; + } + + private: + RecycledObjectPool & m_pool; + Item* const m_item; + }; + +public: + RecycledObjectPool () noexcept + { + } + + ~RecycledObjectPool () + { + for (;;) + { + Item* const item = m_stack.pop_front (); + + if (item != nullptr) + delete item; + else + break; + } + } + +private: + Item* get () + { + Item* item = m_stack.pop_front (); + + if (item == nullptr) + { + item = new Item; + + if (item == nullptr) + Throw (std::bad_alloc ()); + } + + return item; + } + + void release (Item* item) noexcept + { + m_stack.push_front (item); + } + +private: + LockFreeStack m_stack; +}; + +#endif diff --git a/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h b/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h index e1f1a614b1..349dde0a10 100644 --- a/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h +++ b/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h @@ -45,13 +45,16 @@ @code - class MyClass : Uncopyable + class MyClass : public Uncopyable { public: //... }; @endcode + + @note The derivation should be public or else child classes which + also derive from Uncopyable may not compile. */ class Uncopyable { diff --git a/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp b/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp index a61394f08f..a3ad744474 100644 --- a/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp +++ b/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp @@ -21,6 +21,8 @@ */ //============================================================================== +Result::Result() noexcept {} + Result::Result (const String& message) noexcept : errorMessage (message) { @@ -60,11 +62,6 @@ bool Result::operator!= (const Result& other) const noexcept return errorMessage != other.errorMessage; } -Result Result::ok() noexcept -{ - return Result (String::empty); -} - Result Result::fail (const String& errorMessage) noexcept { return Result (errorMessage.isEmpty() ? "Unknown Error" : errorMessage); diff --git a/Subtrees/beast/modules/beast_core/misc/beast_Result.h b/Subtrees/beast/modules/beast_core/misc/beast_Result.h index 137daa830c..91bd1e1aee 100644 --- a/Subtrees/beast/modules/beast_core/misc/beast_Result.h +++ b/Subtrees/beast/modules/beast_core/misc/beast_Result.h @@ -26,10 +26,7 @@ #include "../text/beast_String.h" - -//============================================================================== -/** - Represents the 'success' or 'failure' of an operation, and holds an associated +/** Represents the 'success' or 'failure' of an operation, and holds an associated error message to describe the error when there's a failure. E.g. @@ -55,12 +52,12 @@ } @endcode */ -class BEAST_API Result +class BEAST_API Result { public: //============================================================================== /** Creates and returns a 'successful' result. */ - static Result ok() noexcept; + static Result ok() noexcept { return Result(); } /** Creates a 'failure' result. If you pass a blank error message in here, a default "Unknown Error" message @@ -94,12 +91,12 @@ public: const String& getErrorMessage() const noexcept; //============================================================================== - Result (const Result& other); - Result& operator= (const Result& other); + Result (const Result&); + Result& operator= (const Result&); #if BEAST_COMPILER_SUPPORTS_MOVE_SEMANTICS - Result (Result&& other) noexcept; - Result& operator= (Result&& other) noexcept; + Result (Result&&) noexcept; + Result& operator= (Result&&) noexcept; #endif bool operator== (const Result& other) const noexcept; @@ -108,6 +105,9 @@ public: private: String errorMessage; + // The default constructor is not for public use! + // Instead, use Result::ok() or Result::fail() + Result() noexcept; explicit Result (const String&) noexcept; // These casts are private to prevent people trying to use the Result object in numeric contexts @@ -115,5 +115,5 @@ private: operator void*() const; }; +#endif -#endif // BEAST_RESULT_BEASTHEADER diff --git a/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h b/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h index 936be35f40..222490176e 100644 --- a/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h +++ b/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h @@ -504,6 +504,184 @@ Result FileOutputStream::truncate() return getResultForReturnValue (ftruncate (getFD (fileHandle), (off_t) currentPosition)); } +//============================================================================== + +Result RandomAccessFile::nativeOpen (File const& path, Mode mode) +{ + bassert (! isOpen ()); + + Result result (Result::ok ()); + + if (path.exists()) + { + int oflag; + switch (mode) + { + case readOnly: + oflag = O_RDONLY; + break; + + default: + case readWrite: + oflag = O_RDWR; + break; + }; + + const int f = ::open (path.getFullPathName().toUTF8(), oflag, 00644); + + if (f != -1) + { + currentPosition = lseek (f, 0, SEEK_SET); + + if (currentPosition >= 0) + { + file = path; + fileHandle = fdToVoidPointer (f); + } + else + { + result = getResultForErrno(); + ::close (f); + } + } + else + { + result = getResultForErrno(); + } + } + else if (mode == readWrite) + { + const int f = ::open (path.getFullPathName().toUTF8(), O_RDWR + O_CREAT, 00644); + + if (f != -1) + { + file = path; + fileHandle = fdToVoidPointer (f); + } + else + { + result = getResultForErrno(); + } + } + else + { + // file doesn't exist and we're opening read-only + Result::fail (String (strerror (ENOENT))); + } + + return result; +} + +void RandomAccessFile::nativeClose () +{ + bassert (isOpen ()); + + file = File::nonexistent (); + ::close (getFD (fileHandle)); + fileHandle = nullptr; + currentPosition = 0; +} + +Result RandomAccessFile::nativeSetPosition (FileOffset newPosition) +{ + bassert (isOpen ()); + + off_t const actualPosition = lseek (getFD (fileHandle), newPosition, SEEK_SET); + + currentPosition = actualPosition; + + if (actualPosition != newPosition) + { + // VFALCO NOTE I dislike return from the middle but + // Result::ok() is showing up in the profile + // + return getResultForErrno(); + } + + return Result::ok(); +} + +Result RandomAccessFile::nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount) +{ + bassert (isOpen ()); + + ssize_t bytesRead = ::read (getFD (fileHandle), buffer, numBytes); + + if (bytesRead < 0) + { + if (pActualAmount != nullptr) + *pActualAmount = 0; + + // VFALCO NOTE I dislike return from the middle but + // Result::ok() is showing up in the profile + // + return getResultForErrno(); + } + + currentPosition += bytesRead; + + if (pActualAmount != nullptr) + *pActualAmount = bytesRead; + + return Result::ok(); +} + +Result RandomAccessFile::nativeWrite (void const* data, ByteCount numBytes, size_t* pActualAmount) +{ + bassert (isOpen ()); + + ssize_t bytesWritten = ::write (getFD (fileHandle), data, numBytes); + + // write(3) says that the actual return will be exactly -1 on + // error, but we will assume anything negative indicates failure. + // + if (bytesWritten < 0) + { + if (pActualAmount != nullptr) + *pActualAmount = 0; + + // VFALCO NOTE I dislike return from the middle but + // Result::ok() is showing up in the profile + // + return getResultForErrno(); + } + + if (pActualAmount != nullptr) + *pActualAmount = bytesWritten; + + return Result::ok(); +} + +Result RandomAccessFile::nativeTruncate () +{ + bassert (isOpen ()); + + flush(); + + return getResultForReturnValue (ftruncate (getFD (fileHandle), (off_t) currentPosition)); +} + +Result RandomAccessFile::nativeFlush () +{ + bassert (isOpen ()); + + Result result (Result::ok ()); + + if (fsync (getFD (fileHandle)) == -1) + result = getResultForErrno(); + +#if BEAST_ANDROID + // This stuff tells the OS to asynchronously update the metadata + // that the OS has cached aboud the file - this metadata is used + // when the device is acting as a USB drive, and unless it's explicitly + // refreshed, it'll get out of step with the real file. + const LocalRef t (javaString (file.getFullPathName())); + android.activity.callVoidMethod (BeastAppActivity.scanFile, t.get()); +#endif + + return result; +} + //============================================================================== String SystemStats::getEnvironmentVariable (const String& name, const String& defaultValue) { diff --git a/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp b/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp index 444bc51c3e..302ba9a960 100644 --- a/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp +++ b/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp @@ -307,6 +307,163 @@ Result FileOutputStream::truncate() : WindowsFileHelpers::getResultForLastError(); } +//============================================================================== + +Result RandomAccessFile::nativeOpen (File const& path, Mode mode) +{ + bassert (! isOpen ()); + + Result result (Result::ok ()); + + DWORD dwDesiredAccess; + switch (mode) + { + case readOnly: + dwDesiredAccess = GENERIC_READ; + break; + + default: + case readWrite: + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + break; + }; + + DWORD dwCreationDisposition; + switch (mode) + { + case readOnly: + dwCreationDisposition = OPEN_EXISTING; + break; + + default: + case readWrite: + dwCreationDisposition = OPEN_ALWAYS; + break; + }; + + HANDLE h = CreateFile (path.getFullPathName().toWideCharPointer(), + dwDesiredAccess, + FILE_SHARE_READ, + 0, + dwCreationDisposition, + FILE_ATTRIBUTE_NORMAL, + 0); + + if (h != INVALID_HANDLE_VALUE) + { + file = path; + fileHandle = h; + + result = setPosition (0); + + if (result.failed ()) + nativeClose (); + } + else + { + result = WindowsFileHelpers::getResultForLastError(); + } + + return result; +} + +void RandomAccessFile::nativeClose () +{ + bassert (isOpen ()); + + CloseHandle ((HANDLE) fileHandle); + + file = File::nonexistent (); + fileHandle = nullptr; + currentPosition = 0; +} + +Result RandomAccessFile::nativeSetPosition (FileOffset newPosition) +{ + bassert (isOpen ()); + + Result result (Result::ok ()); + + LARGE_INTEGER li; + li.QuadPart = newPosition; + li.LowPart = SetFilePointer ((HANDLE) fileHandle, + (LONG) li.LowPart, + &li.HighPart, + FILE_BEGIN); + + if (li.LowPart != INVALID_SET_FILE_POINTER) + { + currentPosition = li.QuadPart; + } + else + { + result = WindowsFileHelpers::getResultForLastError(); + } + + return result; +} + +Result RandomAccessFile::nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount) +{ + bassert (isOpen ()); + + Result result (Result::ok ()); + + DWORD actualNum = 0; + + if (! ReadFile ((HANDLE) fileHandle, buffer, (DWORD) numBytes, &actualNum, 0)) + result = WindowsFileHelpers::getResultForLastError(); + + currentPosition += actualNum; + + if (pActualAmount != nullptr) + *pActualAmount = actualNum; + + return result; +} + +Result RandomAccessFile::nativeWrite (void const* data, ByteCount numBytes, size_t* pActualAmount) +{ + bassert (isOpen ()); + + Result result (Result::ok ()); + + DWORD actualNum = 0; + + if (! WriteFile ((HANDLE) fileHandle, data, (DWORD) numBytes, &actualNum, 0)) + result = WindowsFileHelpers::getResultForLastError(); + + if (pActualAmount != nullptr) + *pActualAmount = actualNum; + + return result; +} + +Result RandomAccessFile::nativeTruncate () +{ + bassert (isOpen ()); + + Result result (Result::ok ()); + + if (! SetEndOfFile ((HANDLE) fileHandle)) + result = WindowsFileHelpers::getResultForLastError(); + + return result; +} + +Result RandomAccessFile::nativeFlush () +{ + bassert (isOpen ()); + + Result result (Result::ok ()); + + if (! FlushFileBuffers ((HANDLE) fileHandle)) + result = WindowsFileHelpers::getResultForLastError(); + + return result; +} + + //============================================================================== void MemoryMappedFile::openInternal (const File& file, AccessMode mode) { diff --git a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp index ebefef3171..ac47e96991 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp +++ b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp @@ -65,6 +65,8 @@ short InputStream::readShortBigEndian() int InputStream::readInt() { + static_bassert (sizeof (int) == 4); + char temp[4]; if (read (temp, 4) == 4) @@ -73,6 +75,16 @@ int InputStream::readInt() return 0; } +int32 InputStream::readInt32() +{ + char temp[4]; + + if (read (temp, 4) == 4) + return (int32) ByteOrder::littleEndianInt (temp); + + return 0; +} + int InputStream::readIntBigEndian() { char temp[4]; @@ -83,6 +95,16 @@ int InputStream::readIntBigEndian() return 0; } +int32 InputStream::readInt32BigEndian() +{ + char temp[4]; + + if (read (temp, 4) == 4) + return (int32) ByteOrder::bigEndianInt (temp); + + return 0; +} + int InputStream::readCompressedInt() { const uint8 sizeByte = (uint8) readByte(); @@ -229,3 +251,71 @@ void InputStream::skipNextBytes (int64 numBytesToSkip) numBytesToSkip -= read (temp, (int) bmin (numBytesToSkip, (int64) skipBufferSize)); } } + +//------------------------------------------------------------------------------ + +// Unfortunately, putting these in the header causes duplicate +// definition linker errors, even with the inline keyword! + +template <> +char InputStream::readType () { return readByte (); } + +template <> +short InputStream::readType () { return readShort (); } + +template <> +int32 InputStream::readType () { return readInt32 (); } + +template <> +int64 InputStream::readType () { return readInt64 (); } + +template <> +unsigned char InputStream::readType () { return static_cast (readByte ()); } + +template <> +unsigned short InputStream::readType () { return static_cast (readShort ()); } + +template <> +uint32 InputStream::readType () { return static_cast (readInt32 ()); } + +template <> +uint64 InputStream::readType () { return static_cast (readInt64 ()); } + +template <> +float InputStream::readType () { return readFloat (); } + +template <> +double InputStream::readType () { return readDouble (); } + +//------------------------------------------------------------------------------ + +template <> +char InputStream::readTypeBigEndian () { return readByte (); } + +template <> +short InputStream::readTypeBigEndian () { return readShortBigEndian (); } + +template <> +int32 InputStream::readTypeBigEndian () { return readInt32BigEndian (); } + +template <> +int64 InputStream::readTypeBigEndian () { return readInt64BigEndian (); } + +template <> +unsigned char InputStream::readTypeBigEndian () { return static_cast (readByte ()); } + +template <> +unsigned short InputStream::readTypeBigEndian () { return static_cast (readShortBigEndian ()); } + +template <> +uint32 InputStream::readTypeBigEndian () { return static_cast (readInt32BigEndian ()); } + +template <> +uint64 InputStream::readTypeBigEndian () { return static_cast (readInt64BigEndian ()); } + +template <> +float InputStream::readTypeBigEndian () { return readFloatBigEndian (); } + +template <> +double InputStream::readTypeBigEndian () { return readDoubleBigEndian (); } + diff --git a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h index 7d7e643234..081a7b037b 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h +++ b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h @@ -92,7 +92,7 @@ public: /** Reads a boolean from the stream. - The bool is encoded as a single byte - 1 for true, 0 for false. + The bool is encoded as a single byte - non-zero for true, 0 for false. If the stream is exhausted, this will return false. @@ -111,10 +111,13 @@ public: */ virtual short readShort(); + // VFALCO TODO Implement these functions + //virtual int16 readInt16 (); + //virtual uint16 readUInt16 (); + /** Reads two bytes from the stream as a little-endian 16-bit value. - If the next two bytes read are byte1 and byte2, this returns - (byte2 | (byte1 << 8)). + If the next two bytes read are byte1 and byte2, this returns (byte1 | (byte2 << 8)). If the stream is exhausted partway through reading the bytes, this will return zero. @@ -131,6 +134,13 @@ public: @see OutputStream::writeInt, readIntBigEndian */ + virtual int32 readInt32(); + + // VFALCO TODO Implement these functions + //virtual int16 readInt16BigEndian (); + //virtual uint16 readUInt16BigEndian (); + + // DEPRECATED, assumes sizeof(int) == 4! virtual int readInt(); /** Reads four bytes from the stream as a big-endian 32-bit value. @@ -142,6 +152,9 @@ public: @see OutputStream::writeIntBigEndian, readInt */ + virtual int32 readInt32BigEndian(); + + // DEPRECATED, assumes sizeof(int) == 4! virtual int readIntBigEndian(); /** Reads eight bytes from the stream as a little-endian 64-bit value. @@ -216,6 +229,49 @@ public: */ virtual int readCompressedInt(); + /** Reads a type using a template specialization. + + This is useful when doing template meta-programming. + */ + template + T readType (); + + /** Reads a type using a template specialization. + + The variable is passed as a parameter so that the template type + can be deduced. + + This is useful when doing template meta-programming. + */ + template + void readTypeInto (T* p) + { + *p = readType (); + } + + /** Reads a type from a big endian stream using a template specialization. + + The raw encoding of the type is read from the stream as a big-endian value + where applicable. + + This is useful when doing template meta-programming. + */ + template + T readTypeBigEndian (); + + /** Reads a type using a template specialization. + + The variable is passed as a parameter so that the template type + can be deduced. + + This is useful when doing template meta-programming. + */ + template + void readTypeBigEndianInto (T* p) + { + *p = readTypeBigEndian (); + } + //============================================================================== /** Reads a UTF-8 string from the stream, up to the next linefeed or carriage return. @@ -289,4 +345,4 @@ protected: InputStream() noexcept {} }; -#endif // BEAST_INPUTSTREAM_BEASTHEADER +#endif diff --git a/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp index 59c6078562..2cdd4198a2 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp +++ b/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp @@ -92,7 +92,7 @@ int64 MemoryInputStream::getPosition() class MemoryStreamTests : public UnitTest { public: - MemoryStreamTests() : UnitTest ("MemoryStream") { } + MemoryStreamTests() : UnitTest ("MemoryStream", "beast") { } void runTest() { @@ -148,6 +148,4 @@ public: } }; -#if BEAST_UNIT_TESTS static MemoryStreamTests memoryStreamTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp index 0505920614..8df895da1b 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp +++ b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp @@ -22,23 +22,28 @@ //============================================================================== MemoryOutputStream::MemoryOutputStream (const size_t initialSize) - : data (internalBlock), - position (0), - size (0) + : blockToUse (&internalBlock), externalData (nullptr), + position (0), size (0), availableSize (0) { internalBlock.setSize (initialSize, false); } MemoryOutputStream::MemoryOutputStream (MemoryBlock& memoryBlockToWriteTo, const bool appendToExistingBlockContent) - : data (memoryBlockToWriteTo), - position (0), - size (0) + : blockToUse (&memoryBlockToWriteTo), externalData (nullptr), + position (0), size (0), availableSize (0) { if (appendToExistingBlockContent) position = size = memoryBlockToWriteTo.getSize(); } +MemoryOutputStream::MemoryOutputStream (void* destBuffer, size_t destBufferSize) + : blockToUse (nullptr), externalData (destBuffer), + position (0), size (0), availableSize (destBufferSize) +{ + bassert (externalData != nullptr); // This must be a valid pointer. +} + MemoryOutputStream::~MemoryOutputStream() { trimExternalBlockSize(); @@ -51,13 +56,14 @@ void MemoryOutputStream::flush() void MemoryOutputStream::trimExternalBlockSize() { - if (&data != &internalBlock) - data.setSize (size, false); + if (blockToUse != &internalBlock && blockToUse != nullptr) + blockToUse->setSize (size, false); } void MemoryOutputStream::preallocate (const size_t bytesToPreallocate) { - data.ensureSize (bytesToPreallocate + 1); + if (blockToUse != nullptr) + blockToUse->ensureSize (bytesToPreallocate + 1); } void MemoryOutputStream::reset() noexcept @@ -71,10 +77,24 @@ char* MemoryOutputStream::prepareToWrite (size_t numBytes) bassert ((ssize_t) numBytes >= 0); size_t storageNeeded = position + numBytes; - if (storageNeeded >= data.getSize()) - data.ensureSize ((storageNeeded + bmin (storageNeeded / 2, (size_t) (1024 * 1024)) + 32) & ~31u); + char* data; - char* const writePointer = static_cast (data.getData()) + position; + if (blockToUse != nullptr) + { + if (storageNeeded >= blockToUse->getSize()) + blockToUse->ensureSize ((storageNeeded + bmin (storageNeeded / 2, (size_t) (1024 * 1024)) + 32) & ~31u); + + data = static_cast (blockToUse->getData()); + } + else + { + if (storageNeeded > availableSize) + return nullptr; + + data = static_cast (externalData); + } + + char* const writePointer = data + position; position += numBytes; size = bmax (size, position); return writePointer; @@ -82,23 +102,43 @@ char* MemoryOutputStream::prepareToWrite (size_t numBytes) bool MemoryOutputStream::write (const void* const buffer, size_t howMany) { - bassert (buffer != nullptr && ((ssize_t) howMany) >= 0); + bassert (buffer != nullptr); - if (howMany > 0) - memcpy (prepareToWrite (howMany), buffer, howMany); + if (howMany == 0) + return true; - return true; + if (char* dest = prepareToWrite (howMany)) + { + memcpy (dest, buffer, howMany); + return true; + } + + return false; } -void MemoryOutputStream::writeRepeatedByte (uint8 byte, size_t howMany) +bool MemoryOutputStream::writeRepeatedByte (uint8 byte, size_t howMany) { - if (howMany > 0) - memset (prepareToWrite (howMany), byte, howMany); + if (howMany == 0) + return true; + + if (char* dest = prepareToWrite (howMany)) + { + memset (dest, byte, howMany); + return true; + } + + return false; } -void MemoryOutputStream::appendUTF8Char (beast_wchar c) +bool MemoryOutputStream::appendUTF8Char (beast_wchar c) { - CharPointer_UTF8 (prepareToWrite (CharPointer_UTF8::getBytesRequiredFor (c))).write (c); + if (char* dest = prepareToWrite (CharPointer_UTF8::getBytesRequiredFor (c))) + { + CharPointer_UTF8 (dest).write (c); + return true; + } + + return false; } MemoryBlock MemoryOutputStream::getMemoryBlock() const @@ -108,10 +148,13 @@ MemoryBlock MemoryOutputStream::getMemoryBlock() const const void* MemoryOutputStream::getData() const noexcept { - if (data.getSize() > size) - static_cast (data.getData()) [size] = 0; + if (blockToUse == nullptr) + return externalData; - return data.getData(); + if (blockToUse->getSize() > size) + static_cast (blockToUse->getData()) [size] = 0; + + return blockToUse->getData(); } bool MemoryOutputStream::setPosition (int64 newPosition) @@ -137,7 +180,8 @@ int MemoryOutputStream::writeFromInputStream (InputStream& source, int64 maxNumB if (maxNumBytesToWrite > availableData) maxNumBytesToWrite = availableData; - preallocate (data.getSize() + (size_t) maxNumBytesToWrite); + if (blockToUse != nullptr) + preallocate (blockToUse->getSize() + (size_t) maxNumBytesToWrite); } return OutputStream::writeFromInputStream (source, maxNumBytesToWrite); @@ -162,4 +206,4 @@ OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const MemoryOutpu stream.write (streamToRead.getData(), dataSize); return stream; -} +} \ No newline at end of file diff --git a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h index 1413dcbf3d..be5fd04f28 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h +++ b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h @@ -28,7 +28,6 @@ #include "../memory/beast_MemoryBlock.h" #include "../memory/beast_ScopedPointer.h" - //============================================================================== /** Writes data to an internal memory buffer, which grows as required. @@ -36,14 +35,20 @@ The data that was written into the stream can then be accessed later as a contiguous block of memory. */ -class BEAST_API MemoryOutputStream +//============================================================================== +/** + Writes data to an internal memory buffer, which grows as required. + + The data that was written into the stream can then be accessed later as + a contiguous block of memory. +*/ +class BEAST_API MemoryOutputStream : public OutputStream , LeakChecked { public: //============================================================================== /** Creates an empty memory stream, ready to be written into. - @param initialSize the intial amount of capacity to allocate for writing into */ MemoryOutputStream (size_t initialSize = 256); @@ -63,6 +68,14 @@ public: MemoryOutputStream (MemoryBlock& memoryBlockToWriteTo, bool appendToExistingBlockContent); + /** Creates a MemoryOutputStream that will write into a user-supplied, fixed-size + block of memory. + + When using this mode, the stream will write directly into this memory area until + it's full, at which point write operations will fail. + */ + MemoryOutputStream (void* destBuffer, size_t destBufferSize); + /** Destructor. This will free any data that was written to it. */ @@ -88,7 +101,7 @@ public: void preallocate (size_t bytesToPreallocate); /** Appends the utf-8 bytes for a unicode character */ - void appendUTF8Char (beast_wchar character); + bool appendUTF8Char (beast_wchar character); /** Returns a String created from the (UTF8) data that has been written to the stream. */ String toUTF8() const; @@ -108,24 +121,24 @@ public: */ void flush(); - bool write (const void* buffer, size_t howMany); - int64 getPosition() { return position; } - bool setPosition (int64 newPosition); - int writeFromInputStream (InputStream& source, int64 maxNumBytesToWrite); - void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat); + bool write (const void*, size_t) override; + int64 getPosition() override { return position; } + bool setPosition (int64) override; + int writeFromInputStream (InputStream&, int64 maxNumBytesToWrite) override; + bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) override; private: - //============================================================================== - MemoryBlock& data; - MemoryBlock internalBlock; - size_t position, size; - void trimExternalBlockSize(); char* prepareToWrite (size_t); + + //============================================================================== + MemoryBlock* const blockToUse; + MemoryBlock internalBlock; + void* externalData; + size_t position, size, availableSize; }; /** Copies all the data that has been written to a MemoryOutputStream into another stream. */ OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const MemoryOutputStream& streamToRead); - -#endif // BEAST_MEMORYOUTPUTSTREAM_BEASTHEADER +#endif \ No newline at end of file diff --git a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp index 5089153779..614b32e1db 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp +++ b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp @@ -61,48 +61,69 @@ OutputStream::~OutputStream() } //============================================================================== -void OutputStream::writeBool (const bool b) +bool OutputStream::writeBool (const bool b) { - writeByte (b ? (char) 1 - : (char) 0); + return writeByte (b ? (char) 1 + : (char) 0); } -void OutputStream::writeByte (char byte) +bool OutputStream::writeByte (char byte) { - write (&byte, 1); + return write (&byte, 1); } -void OutputStream::writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) +bool OutputStream::writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) { for (size_t i = 0; i < numTimesToRepeat; ++i) - writeByte ((char) byte); + if (! writeByte ((char) byte)) + return false; + + return true; } -void OutputStream::writeShort (short value) +bool OutputStream::writeShort (short value) { const unsigned short v = ByteOrder::swapIfBigEndian ((unsigned short) value); - write (&v, 2); + return write (&v, 2); } -void OutputStream::writeShortBigEndian (short value) +bool OutputStream::writeShortBigEndian (short value) { const unsigned short v = ByteOrder::swapIfLittleEndian ((unsigned short) value); - write (&v, 2); + return write (&v, 2); } -void OutputStream::writeInt (int value) +bool OutputStream::writeInt32 (int32 value) { + static_bassert (sizeof (int32) == 4); + + const unsigned int v = ByteOrder::swapIfBigEndian ((uint32) value); + return write (&v, 4); +} + +bool OutputStream::writeInt (int value) +{ + static_bassert (sizeof (int) == 4); + const unsigned int v = ByteOrder::swapIfBigEndian ((unsigned int) value); - write (&v, 4); + return write (&v, 4); } -void OutputStream::writeIntBigEndian (int value) +bool OutputStream::writeInt32BigEndian (int value) { - const unsigned int v = ByteOrder::swapIfLittleEndian ((unsigned int) value); - write (&v, 4); + static_bassert (sizeof (int32) == 4); + const uint32 v = ByteOrder::swapIfLittleEndian ((uint32) value); + return write (&v, 4); } -void OutputStream::writeCompressedInt (int value) +bool OutputStream::writeIntBigEndian (int value) +{ + static_bassert (sizeof (int) == 4); + const unsigned int v = ByteOrder::swapIfLittleEndian ((unsigned int) value); + return write (&v, 4); +} + +bool OutputStream::writeCompressedInt (int value) { unsigned int un = (value < 0) ? (unsigned int) -value : (unsigned int) value; @@ -121,60 +142,60 @@ void OutputStream::writeCompressedInt (int value) if (value < 0) data[0] |= 0x80; - write (data, num + 1); + return write (data, num + 1); } -void OutputStream::writeInt64 (int64 value) +bool OutputStream::writeInt64 (int64 value) { const uint64 v = ByteOrder::swapIfBigEndian ((uint64) value); - write (&v, 8); + return write (&v, 8); } -void OutputStream::writeInt64BigEndian (int64 value) +bool OutputStream::writeInt64BigEndian (int64 value) { const uint64 v = ByteOrder::swapIfLittleEndian ((uint64) value); - write (&v, 8); + return write (&v, 8); } -void OutputStream::writeFloat (float value) +bool OutputStream::writeFloat (float value) { union { int asInt; float asFloat; } n; n.asFloat = value; - writeInt (n.asInt); + return writeInt (n.asInt); } -void OutputStream::writeFloatBigEndian (float value) +bool OutputStream::writeFloatBigEndian (float value) { union { int asInt; float asFloat; } n; n.asFloat = value; - writeIntBigEndian (n.asInt); + return writeIntBigEndian (n.asInt); } -void OutputStream::writeDouble (double value) +bool OutputStream::writeDouble (double value) { union { int64 asInt; double asDouble; } n; n.asDouble = value; - writeInt64 (n.asInt); + return writeInt64 (n.asInt); } -void OutputStream::writeDoubleBigEndian (double value) +bool OutputStream::writeDoubleBigEndian (double value) { union { int64 asInt; double asDouble; } n; n.asDouble = value; - writeInt64BigEndian (n.asInt); + return writeInt64BigEndian (n.asInt); } -void OutputStream::writeString (const String& text) +bool OutputStream::writeString (const String& text) { // (This avoids using toUTF8() to prevent the memory bloat that it would leave behind // if lots of large, persistent strings were to be written to streams). const size_t numBytes = text.getNumBytesAsUTF8() + 1; HeapBlock temp (numBytes); text.copyToUTF8 (temp, numBytes); - write (temp, numBytes); + return write (temp, numBytes); } -void OutputStream::writeText (const String& text, const bool asUTF16, +bool OutputStream::writeText (const String& text, const bool asUTF16, const bool writeUTF16ByteOrderMark) { if (asUTF16) @@ -196,7 +217,9 @@ void OutputStream::writeText (const String& text, const bool asUTF16, writeShort ((short) '\r'); lastCharWasReturn = (c == L'\r'); - writeShort ((short) c); + + if (! writeShort ((short) c)) + return false; } } else @@ -209,9 +232,12 @@ void OutputStream::writeText (const String& text, const bool asUTF16, if (*t == '\n') { if (t > src) - write (src, (int) (t - src)); + if (! write (src, (int) (t - src))) + return false; + + if (! write ("\r\n", 2)) + return false; - write ("\r\n", 2); src = t + 1; } else if (*t == '\r') @@ -222,7 +248,8 @@ void OutputStream::writeText (const String& text, const bool asUTF16, else if (*t == 0) { if (t > src) - write (src, (int) (t - src)); + if (! write (src, (int) (t - src))) + return false; break; } @@ -230,6 +257,8 @@ void OutputStream::writeText (const String& text, const bool asUTF16, ++t; } } + + return true; } int OutputStream::writeFromInputStream (InputStream& source, int64 numBytesToWrite) @@ -318,3 +347,70 @@ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const N { return stream << stream.getNewLineString(); } + +//------------------------------------------------------------------------------ + +// Unfortunately, putting these in the header causes duplicate +// definition linker errors, even with the inline keyword! + +template <> +BEAST_API bool OutputStream::writeType (char v) { return writeByte (v); } + +template <> +BEAST_API bool OutputStream::writeType (short v) { return writeShort (v); } + +template <> +BEAST_API bool OutputStream::writeType (int32 v) { return writeInt32 (v); } + +template <> +BEAST_API bool OutputStream::writeType (int64 v) { return writeInt64 (v); } + +template <> +BEAST_API bool OutputStream::writeType (unsigned char v) { return writeByte (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeType (unsigned short v) { return writeShort (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeType (uint32 v) { return writeInt32 (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeType (uint64 v) { return writeInt64 (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeType (float v) { return writeFloat (v); } + +template <> +BEAST_API bool OutputStream::writeType (double v) { return writeDouble (v); } + +//------------------------------------------------------------------------------ + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (char v) { return writeByte (v); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (short v) { return writeShortBigEndian (v); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (int32 v) { return writeInt32BigEndian (v); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (int64 v) { return writeInt64BigEndian (v); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (unsigned char v) { return writeByte (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (unsigned short v) { return writeShortBigEndian (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (uint32 v) { return writeInt32BigEndian (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (uint64 v) { return writeInt64BigEndian (static_cast (v)); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (float v) { return writeFloatBigEndian (v); } + +template <> +BEAST_API bool OutputStream::writeTypeBigEndian (double v) { return writeDoubleBigEndian (v); } diff --git a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h index 9365041ba8..b536c48a57 100644 --- a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h +++ b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h @@ -40,9 +40,7 @@ class File; @see InputStream, MemoryOutputStream, FileOutputStream */ -class BEAST_API OutputStream - : public Uncopyable - , LeakChecked +class BEAST_API OutputStream : public Uncopyable { protected: //============================================================================== @@ -92,75 +90,111 @@ public: //============================================================================== /** Writes a single byte to the stream. - + @returns false if the write operation fails for some reason @see InputStream::readByte */ - virtual void writeByte (char byte); + virtual bool writeByte (char byte); /** Writes a boolean to the stream as a single byte. This is encoded as a binary byte (not as text) with a value of 1 or 0. + @returns false if the write operation fails for some reason @see InputStream::readBool */ - virtual void writeBool (bool boolValue); + virtual bool writeBool (bool boolValue); /** Writes a 16-bit integer to the stream in a little-endian byte order. This will write two bytes to the stream: (value & 0xff), then (value >> 8). + @returns false if the write operation fails for some reason @see InputStream::readShort */ - virtual void writeShort (short value); + virtual bool writeShort (short value); /** Writes a 16-bit integer to the stream in a big-endian byte order. This will write two bytes to the stream: (value >> 8), then (value & 0xff). + @returns false if the write operation fails for some reason @see InputStream::readShortBigEndian */ - virtual void writeShortBigEndian (short value); + virtual bool writeShortBigEndian (short value); /** Writes a 32-bit integer to the stream in a little-endian byte order. + @returns false if the write operation fails for some reason @see InputStream::readInt */ - virtual void writeInt (int value); + virtual bool writeInt32 (int32 value); + + // DEPRECATED, assumes sizeof (int) == 4! + virtual bool writeInt (int value); /** Writes a 32-bit integer to the stream in a big-endian byte order. + @returns false if the write operation fails for some reason @see InputStream::readIntBigEndian */ - virtual void writeIntBigEndian (int value); + virtual bool writeInt32BigEndian (int32 value); + + // DEPRECATED, assumes sizeof (int) == 4! + virtual bool writeIntBigEndian (int value); /** Writes a 64-bit integer to the stream in a little-endian byte order. + @returns false if the write operation fails for some reason @see InputStream::readInt64 */ - virtual void writeInt64 (int64 value); + virtual bool writeInt64 (int64 value); /** Writes a 64-bit integer to the stream in a big-endian byte order. + @returns false if the write operation fails for some reason @see InputStream::readInt64BigEndian */ - virtual void writeInt64BigEndian (int64 value); + virtual bool writeInt64BigEndian (int64 value); /** Writes a 32-bit floating point value to the stream in a binary format. The binary 32-bit encoding of the float is written as a little-endian int. + @returns false if the write operation fails for some reason @see InputStream::readFloat */ - virtual void writeFloat (float value); + virtual bool writeFloat (float value); /** Writes a 32-bit floating point value to the stream in a binary format. The binary 32-bit encoding of the float is written as a big-endian int. + @returns false if the write operation fails for some reason @see InputStream::readFloatBigEndian */ - virtual void writeFloatBigEndian (float value); + virtual bool writeFloatBigEndian (float value); /** Writes a 64-bit floating point value to the stream in a binary format. The eight raw bytes of the double value are written out as a little-endian 64-bit int. + @returns false if the write operation fails for some reason @see InputStream::readDouble */ - virtual void writeDouble (double value); + virtual bool writeDouble (double value); /** Writes a 64-bit floating point value to the stream in a binary format. The eight raw bytes of the double value are written out as a big-endian 64-bit int. @see InputStream::readDoubleBigEndian + @returns false if the write operation fails for some reason */ - virtual void writeDoubleBigEndian (double value); + virtual bool writeDoubleBigEndian (double value); - /** Writes a byte to the output stream a given number of times. */ - virtual void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat); + /** Write a type using a template specialization. + + This is useful when doing template meta-programming. + */ + template + bool writeType (T value); + + /** Write a type using a template specialization. + + The raw encoding of the type is written to the stream as a big-endian value + where applicable. + + This is useful when doing template meta-programming. + */ + template + bool writeTypeBigEndian (T value); + + /** Writes a byte to the output stream a given number of times. + @returns false if the write operation fails for some reason + */ + virtual bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat); /** Writes a condensed binary encoding of a 32-bit integer. @@ -170,9 +204,10 @@ public: The format used is: number of significant bytes + up to 4 bytes in little-endian order. + @returns false if the write operation fails for some reason @see InputStream::readCompressedInt */ - virtual void writeCompressedInt (int value); + virtual bool writeCompressedInt (int value); /** Stores a string in the stream in a binary format. @@ -184,9 +219,10 @@ public: For appending text to a file, instead use writeText, or operator<< + @returns false if the write operation fails for some reason @see InputStream::readString, writeText, operator<< */ - virtual void writeString (const String& text); + virtual bool writeString (const String& text); /** Writes a string of text to the stream. @@ -195,8 +231,9 @@ public: of a file). The method also replaces '\\n' characters in the text with '\\r\\n'. + @returns false if the write operation fails for some reason */ - virtual void writeText (const String& text, + virtual bool writeText (const String& text, bool asUTF16, bool writeUTF16ByteOrderMark); @@ -206,6 +243,7 @@ public: @param maxNumBytesToWrite the number of bytes to read from the stream (if this is less than zero, it will keep reading until the input is exhausted) + @returns the number of bytes written */ virtual int writeFromInputStream (InputStream& source, int64 maxNumBytesToWrite); @@ -258,5 +296,4 @@ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, InputSt */ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const NewLine&); - -#endif // BEAST_OUTPUTSTREAM_BEASTHEADER +#endif diff --git a/Subtrees/beast/modules/beast_core/text/beast_String.cpp b/Subtrees/beast/modules/beast_core/text/beast_String.cpp index d8771bb31f..55ad6d5929 100644 --- a/Subtrees/beast/modules/beast_core/text/beast_String.cpp +++ b/Subtrees/beast/modules/beast_core/text/beast_String.cpp @@ -2078,7 +2078,7 @@ String String::fromUTF8 (const char* const buffer, int bufferSizeBytes) class StringTests : public UnitTest { public: - StringTests() : UnitTest ("String") { } + StringTests() : UnitTest ("String", "beast") { } template struct TestUTFConversion @@ -2402,6 +2402,4 @@ public: } }; -#if BEAST_UNIT_TESTS static StringTests stringTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp b/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp index 7a1f2a894c..6da5b587bd 100644 --- a/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp +++ b/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp @@ -177,7 +177,7 @@ String TextDiff::Change::appliedTo (const String& text) const noexcept class DiffTests : public UnitTest { public: - DiffTests() : UnitTest ("TextDiff") {} + DiffTests() : UnitTest ("TextDiff", "beast") {} static String createString() { @@ -229,6 +229,4 @@ public: } }; -#if BEAST_UNIT_TESTS static DiffTests diffTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp b/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp index 4e809a7197..0c08aa82d4 100644 --- a/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp +++ b/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp @@ -61,7 +61,7 @@ String ChildProcess::readAllProcessOutput() class ChildProcessTests : public UnitTest { public: - ChildProcessTests() : UnitTest ("ChildProcess") {} + ChildProcessTests() : UnitTest ("ChildProcess", "beast") {} void runTest() { @@ -82,6 +82,4 @@ public: } }; -#if BEAST_UNIT_TESTS static ChildProcessTests childProcessTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp b/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp index 999f35d444..7685cfa00a 100644 --- a/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp +++ b/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp @@ -255,7 +255,7 @@ void SpinLock::enter() const noexcept class AtomicTests : public UnitTest { public: - AtomicTests() : UnitTest ("Atomic") {} + AtomicTests() : UnitTest ("Atomic", "beast") {} void runTest() { @@ -350,6 +350,4 @@ public: }; }; -#if BEAST_UNIT_TESTS static AtomicTests atomicTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp index d8febfa5ad..7d66da47a0 100644 --- a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp +++ b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp @@ -161,7 +161,7 @@ bool GZIPCompressorOutputStream::setPosition (int64 /*newPosition*/) class GZIPTests : public UnitTest { public: - GZIPTests() : UnitTest ("GZIP") {} + GZIPTests() : UnitTest ("GZIP", "beast") {} void runTest() { @@ -205,6 +205,4 @@ public: } }; -#if BEAST_UNIT_TESTS static GZIPTests gzipTests; -#endif diff --git a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h index c083afc45f..d13e72802a 100644 --- a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h +++ b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h @@ -80,9 +80,9 @@ public: */ void flush(); - int64 getPosition(); - bool setPosition (int64 newPosition); - bool write (const void* destBuffer, size_t howMany); + int64 getPosition() override; + bool setPosition (int64) override; + bool write (const void*, size_t) override; /** These are preset values that can be used for the constructor's windowBits paramter. For more info about this, see the zlib documentation for its windowBits parameter. diff --git a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp index 916e6bad0d..b9a2b5d18f 100644 --- a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp +++ b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp @@ -20,7 +20,7 @@ class UnsignedIntegerTests : public UnitTest { public: - UnsignedIntegerTests () : UnitTest ("UnsignedInteger") + UnsignedIntegerTests () : UnitTest ("UnsignedInteger", "beast") { } @@ -29,7 +29,7 @@ public: { String s; - s << "UnsignedInteger <" << String(Bytes) << ">"; + s << "Bytes=" << String(Bytes); beginTest (s); @@ -82,6 +82,4 @@ public: private: }; -#if BEAST_UNIT_TESTS static UnsignedIntegerTests unsignedIntegerTests; -#endif diff --git a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h index 9d4f950ea3..fc0fa167cb 100644 --- a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h +++ b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h @@ -28,7 +28,7 @@ @tparam Bytes The number of bytes of storage. */ -template +template class UnsignedInteger : public SafeBool > { public: @@ -76,10 +76,10 @@ public: template UnsignedInteger & operator= (IntegerType value) { - static_bassert (sizeof (Bytes) >= sizeof (IntegerType)); + static_bassert (Bytes >= sizeof (IntegerType)); clear (); value = ByteOrder::swapIfLittleEndian (value); - memcpy (end () - sizeof (value), &value, sizeof (value)); + memcpy (end () - sizeof (value), &value, bmin (Bytes, sizeof (value))); return *this; } @@ -234,28 +234,28 @@ public: */ bool operator< (UnsignedInteger const& other) const noexcept { - return compare (other) == -1; + return compare (other) < 0; } /** Ordered comparison. */ bool operator<= (UnsignedInteger const& other) const noexcept { - return compare (other) != 1; + return compare (other) <= 0; } /** Ordered comparison. */ bool operator> (UnsignedInteger const& other) const noexcept { - return compare (other) == 1; + return compare (other) > 0; } /** Ordered comparison. */ bool operator>= (UnsignedInteger const& other) const noexcept { - return compare (other) != -1; + return compare (other) >= 0; } /** Perform bitwise logical-not. diff --git a/Subtrees/beast/modules/beast_db/beast_db.cpp b/Subtrees/beast/modules/beast_db/beast_db.cpp new file mode 100644 index 0000000000..622c2afdbd --- /dev/null +++ b/Subtrees/beast/modules/beast_db/beast_db.cpp @@ -0,0 +1,31 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include "BeastConfig.h" + +#include "beast_db.h" + +#include "../beast_crypto/beast_crypto.h" + +namespace beast +{ + +#include "keyvalue/beast_KeyvaDB.cpp" + +} diff --git a/Subtrees/beast/modules/beast_db/beast_db.h b/Subtrees/beast/modules/beast_db/beast_db.h new file mode 100644 index 0000000000..1612a178d9 --- /dev/null +++ b/Subtrees/beast/modules/beast_db/beast_db.h @@ -0,0 +1,52 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef BEAST_BEAST_DB_H_INCLUDED +#define BEAST_BEAST_DB_H_INCLUDED + +//------------------------------------------------------------------------------ + +/* If you fail to make sure that all your compile units are building Beast with + the same set of option flags, then there's a risk that different compile + units will treat the classes as having different memory layouts, leading to + very nasty memory corruption errors when they all get linked together. + That's why it's best to always include the BeastConfig.h file before any + beast headers. +*/ +#ifndef BEAST_BEASTCONFIG_H_INCLUDED +# ifdef _MSC_VER +# pragma message ("Have you included your BeastConfig.h file before including the Beast headers?") +# else +# warning "Have you included your BeastConfig.h file before including the Beast headers?" +# endif +#endif + +#include "../beast_core/beast_core.h" +#include "../beast_basics/beast_basics.h" + +//------------------------------------------------------------------------------ + +namespace beast +{ + +#include "keyvalue/beast_KeyvaDB.h" + +} + +#endif diff --git a/Subtrees/beast/modules/beast_db/beast_db.mm b/Subtrees/beast/modules/beast_db/beast_db.mm new file mode 100644 index 0000000000..2ae0b83c82 --- /dev/null +++ b/Subtrees/beast/modules/beast_db/beast_db.mm @@ -0,0 +1,20 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include "beast_db.cpp" diff --git a/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp new file mode 100644 index 0000000000..7867292d74 --- /dev/null +++ b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp @@ -0,0 +1,861 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +/* + +TODO + +- Check consistency / range checking on read + +- Cache top level tree nodes + +- Coalesce I/O in RandomAccessFile + +- Delete / file compaction + +*/ + +class KeyvaDBImp : public KeyvaDB +{ +private: + // These are stored in big endian format in the file. + + // A file offset. + typedef int64 FileOffset; + + // Index of a key. + // + // The value is broken up into two parts. The key block index, + // and a 1 based index within the keyblock corresponding to the + // internal key number. + // + typedef int32 KeyIndex; + typedef int32 KeyBlockIndex; + + // Size of a value. + typedef uint32 ByteSize; + +private: + // returns the number of keys in a key block with the specified depth + static int calcKeysAtDepth (int depth) + { + return (1U << depth) - 1; + } + + // returns the number of bytes in a key record + static int calcKeyRecordBytes (int keyBytes) + { + // This depends on the format of a serialized key record + return + sizeof (FileOffset) + + sizeof (ByteSize) + + sizeof (KeyIndex) + + sizeof (KeyIndex) + + keyBytes + ; + } + + // returns the number of bytes in a key block + static int calcKeyBlockBytes (int depth, int keyBytes) + { + return calcKeysAtDepth (depth) * calcKeyRecordBytes (keyBytes); + } + +public: + enum + { + currentVersion = 1 + }; + + + //-------------------------------------------------------------------------- + + struct KeyAddress + { + // 1 based key block number + uint32 blockNumber; + + // 1 based key index within the block, breadth-first left to right + uint32 keyNumber; + }; + + enum + { + // The size of the fixed area at the beginning of the key file. + // This is used to store some housekeeping information like the + // key size and version number. + // + masterHeaderBytes = 1000 + }; + + // The master record is at the beginning of the key file + struct MasterRecord + { + // version number, starting from 1 + int32 version; + + KeyBlockIndex nextKeyBlockIndex; + + void write (OutputStream& stream) + { + stream.writeTypeBigEndian (version); + } + + void read (InputStream& stream) + { + stream.readTypeBigEndianInto (&version); + } + }; + + // Key records are indexed starting at one. + struct KeyRecord : Uncopyable + { + explicit KeyRecord (void* const keyStorage) + : key (keyStorage) + { + } + + // Absolute byte FileOffset in the value file. + FileOffset valFileOffset; + + // Size of the corresponding value, in bytes. + ByteSize valSize; + + // Key record index of left node, or 0. + KeyIndex leftIndex; + + // Key record index of right node, or 0. + KeyIndex rightIndex; + + // Points to keyBytes storage of the key. + void* const key; + }; + + //-------------------------------------------------------------------------- + + // A complete keyblock. The contents of the memory for the key block + // are identical to the format on disk. Therefore it is necessary to + // use the serialization routines to extract or update the key records. + // + class KeyBlock : Uncopyable + { + public: + KeyBlock (int depth, int keyBytes) + : m_depth (depth) + , m_keyBytes (keyBytes) + , m_storage (calcKeyBlockBytes (depth, keyBytes)) + { + } + + void read (InputStream& stream) + { + stream.read (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes)); + } + + void write (OutputStream& stream) + { + stream.write (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes)); + } + + void readKeyRecord (KeyRecord* keyRecord, int keyIndex) + { + bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth)); + + size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes); + + MemoryInputStream stream ( + addBytesToPointer (m_storage.getData (), byteOffset), + calcKeyRecordBytes (m_keyBytes), + false); + + stream.readTypeBigEndianInto (&keyRecord->valFileOffset); + stream.readTypeBigEndianInto (&keyRecord->valSize); + stream.readTypeBigEndianInto (&keyRecord->leftIndex); + stream.readTypeBigEndianInto (&keyRecord->rightIndex); + stream.read (keyRecord->key, m_keyBytes); + } + +#if 0 + void writeKeyRecord (KeyRecord const& keyRecord, int keyIndex) + { + bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth)); + +#if 0 + size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes); + + MemoryOutputStream stream ( + addBytesToPointer (m_storage.getData (), byteOffset), + calcKeyRecordBytes (m_keyBytes)); + + stream.writeTypeBigEndian (keyRecord.valFileOffset); + stream.writeTypeBigEndian (keyRecord.valSize); + stream.writeTypeBigEndian (keyRecord.leftIndex); + stream.writeTypeBigEndian (keyRecord.rightIndex); + stream.write (keyRecord.key, m_keyBytes); +#endif + } +#endif + + private: + int const m_depth; + int const m_keyBytes; + MemoryBlock m_storage; + }; + + //-------------------------------------------------------------------------- + + // Concurrent data + // + struct State + { + RandomAccessFile keyFile; + RandomAccessFile valFile; + MasterRecord masterRecord; + KeyIndex newKeyIndex; + FileOffset valFileSize; + + bool hasKeys () const noexcept + { + return newKeyIndex > 1; + } + }; + + typedef SharedData SharedState; + + //-------------------------------------------------------------------------- + + int const m_keyBytes; + int const m_keyBlockDepth; + SharedState m_state; + HeapBlock m_keyStorage; + + //-------------------------------------------------------------------------- + + KeyvaDBImp (int keyBytes, + int keyBlockDepth, + File keyPath, + File valPath) + : m_keyBytes (keyBytes) + , m_keyBlockDepth (keyBlockDepth) + , m_keyStorage (keyBytes) + { + SharedState::WriteAccess state (m_state); + + openFile (&state->keyFile, keyPath); + + int64 const fileSize = state->keyFile.getFile ().getSize (); + + if (fileSize == 0) + { + // VFALCO TODO Better error handling here + // initialize the key file + Result result = state->keyFile.setPosition (masterHeaderBytes - 1); + if (result.wasOk ()) + { + char byte = 0; + + result = state->keyFile.write (&byte, 1); + + if (result.wasOk ()) + { + state->keyFile.flush (); + } + } + } + + state->newKeyIndex = 1 + (state->keyFile.getFile ().getSize () - masterHeaderBytes) + / calcKeyRecordBytes (m_keyBytes); + + openFile (&state->valFile, valPath); + + state->valFileSize = state->valFile.getFile ().getSize (); + } + + ~KeyvaDBImp () + { + SharedState::WriteAccess state (m_state); + + flushInternal (state); + } + + // Open a file for reading and writing. + // Creates the file if it doesn't exist. + static void openFile (RandomAccessFile* file, File path) + { + Result const result = file->open (path, RandomAccessFile::readWrite); + + if (! result) + { + String s; + s << "KeyvaDB: Couldn't open " << path.getFileName () << " for writing."; + Throw (std::runtime_error (s.toStdString ())); + } + } + + //-------------------------------------------------------------------------- + + Result createMasterRecord (SharedState::WriteAccess& state) + { + MemoryBlock buffer (masterHeaderBytes, true); + + Result result = state->keyFile.setPosition (0); + + if (result.wasOk ()) + { + MasterRecord mr; + + mr.version = 1; + + result = state->keyFile.write (buffer.getData (), buffer.getSize ()); + } + + return result; + } + + //-------------------------------------------------------------------------- + + FileOffset calcKeyRecordOffset (KeyIndex keyIndex) + { + bassert (keyIndex > 0); + + FileOffset const byteOffset = masterHeaderBytes + (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes); + + return byteOffset; + } + + // Read a key record into memory. + // VFALCO TODO Return a Result and do validity checking on all inputs + // + void readKeyRecord (KeyRecord* const keyRecord, + KeyIndex const keyIndex, + SharedState::WriteAccess& state) + { + FileOffset const byteOffset = calcKeyRecordOffset (keyIndex); + + Result result = state->keyFile.setPosition (byteOffset); + + if (result.wasOk ()) + { + MemoryBlock data (calcKeyRecordBytes (m_keyBytes)); + + size_t bytesRead; + + result = state->keyFile.read (data.getData (), calcKeyRecordBytes (m_keyBytes), &bytesRead); + + if (result.wasOk ()) + { + if (bytesRead == calcKeyRecordBytes (m_keyBytes)) + { + MemoryInputStream stream (data, false); + + // This defines the file format! + stream.readTypeBigEndianInto (&keyRecord->valFileOffset); + stream.readTypeBigEndianInto (&keyRecord->valSize); + stream.readTypeBigEndianInto (&keyRecord->leftIndex); + stream.readTypeBigEndianInto (&keyRecord->rightIndex); + + // Grab the key + stream.read (keyRecord->key, m_keyBytes); + } + else + { + result = Result::fail ("KeyvaDB: amountRead != calcKeyRecordBytes()"); + } + } + } + + if (! result.wasOk ()) + { + String s; + s << "KeyvaDB readKeyRecord failed in " << state->keyFile.getFile ().getFileName (); + Throw (std::runtime_error (s.toStdString ())); + } + } + + // Write a key record from memory + void writeKeyRecord (KeyRecord const& keyRecord, + KeyIndex const keyIndex, + SharedState::WriteAccess& state, + bool includingKey) + { + FileOffset const byteOffset = calcKeyRecordOffset (keyIndex); + + int const bytes = calcKeyRecordBytes (m_keyBytes) - (includingKey ? 0 : m_keyBytes); + + // VFALCO TODO Recycle this buffer + MemoryBlock data (bytes); + + { + MemoryOutputStream stream (data, false); + + // This defines the file format! + stream.writeTypeBigEndian (keyRecord.valFileOffset); + stream.writeTypeBigEndian (keyRecord.valSize); + stream.writeTypeBigEndian (keyRecord.leftIndex); + stream.writeTypeBigEndian (keyRecord.rightIndex); + + // Write the key + if (includingKey) + stream.write (keyRecord.key, m_keyBytes); + } + + Result result = state->keyFile.setPosition (byteOffset); + + if (result.wasOk ()) + { + size_t bytesWritten; + + result = state->keyFile.write (data.getData (), bytes, &bytesWritten); + + if (result.wasOk ()) + { + if (bytesWritten != bytes) + { + result = Result::fail ("KeyvaDB: bytesWritten != bytes"); + } + } + } + + if (!result.wasOk ()) + { + String s; + s << "KeyvaDB: writeKeyRecord failed in " << state->keyFile.getFile ().getFileName (); + Throw (std::runtime_error (s.toStdString ())); + } + } + + // Append a value to the value file. + // VFALCO TODO return a Result + void writeValue (void const* const value, ByteSize valueBytes, SharedState::WriteAccess& state) + { + Result result = state->valFile.setPosition (state->valFileSize); + + if (result.wasOk ()) + { + size_t bytesWritten; + + result = state->valFile.write (value, valueBytes, &bytesWritten); + + if (result.wasOk ()) + { + if (bytesWritten == valueBytes) + { + state->valFileSize += valueBytes; + } + else + { + result = Result::fail ("KeyvaDB: bytesWritten != valueBytes"); + } + } + } + + if (! result.wasOk ()) + { + String s; + s << "KeyvaDB: writeValue failed in " << state->valFile.getFile ().getFileName (); + Throw (std::runtime_error (s.toStdString ())); + } + } + + //-------------------------------------------------------------------------- + + struct FindResult : Uncopyable + { + FindResult (void* const keyStorage) + : keyRecord (keyStorage) + { + } + + int compare; // result of the last comparison + KeyIndex keyIndex; // index we looked at last + //KeyBlock keyBlock; // KeyBlock we looked at last + KeyRecord keyRecord; // KeyRecord we looked at last + }; + + // Find a key. If the key doesn't exist, enough information + // is left behind in the result to perform an insertion. + // + // Returns true if the key was found. + // + bool find (FindResult* findResult, void const* key, SharedState::WriteAccess& state) + { + // Not okay to call this with an empty key file! + bassert (state->hasKeys ()); + + // This performs a standard binary search + + findResult->keyIndex = 1; + + do + { + readKeyRecord (&findResult->keyRecord, findResult->keyIndex, state); + + findResult->compare = memcmp (key, findResult->keyRecord.key, m_keyBytes); + + if (findResult->compare < 0) + { + if (findResult->keyRecord.leftIndex != 0) + { + // Go left + findResult->keyIndex = findResult->keyRecord.leftIndex; + } + else + { + // Insert position is to the left + break; + } + } + else if (findResult->compare > 0) + { + if (findResult->keyRecord.rightIndex != 0) + { + // Go right + findResult->keyIndex = findResult->keyRecord.rightIndex; + } + else + { + // Insert position is to the right + break; + } + } + } + while (findResult->compare != 0); + + return findResult->compare == 0; + } + + //-------------------------------------------------------------------------- + + bool get (void const* key, GetCallback* callback) + { + FindResult findResult (m_keyStorage.getData ()); + + SharedState::WriteAccess state (m_state); + + bool found = false; + + if (state->hasKeys ()) + { + found = find (&findResult, key, state); + + if (found) + { + void* const destStorage = callback->getStorageForValue (findResult.keyRecord.valSize); + + Result result = state->valFile.setPosition (findResult.keyRecord.valFileOffset); + + if (result.wasOk ()) + { + size_t bytesRead; + + result = state->valFile.read (destStorage, findResult.keyRecord.valSize, &bytesRead); + + if (result.wasOk ()) + { + if (bytesRead != findResult.keyRecord.valSize) + { + result = Result::fail ("KeyvaDB: bytesRead != valSize"); + } + } + } + + if (! result.wasOk ()) + { + String s; + s << "KeyvaDB: get in " << state->valFile.getFile ().getFileName (); + Throw (std::runtime_error (s.toStdString ())); + } + } + } + + return found; + } + + //-------------------------------------------------------------------------- + + // Write a key value pair. Does nothing if the key exists. + void put (void const* key, void const* value, int valueBytes) + { + bassert (valueBytes > 0); + + SharedState::WriteAccess state (m_state); + + if (state->hasKeys ()) + { + // Search for the key + + FindResult findResult (m_keyStorage.getData ()); + + bool const found = find (&findResult, key, state); + + if (! found ) + { + bassert (findResult.compare != 0); + + // Binary tree insertion. + // Link the last key record to the new key + { + if (findResult.compare < 0) + { + findResult.keyRecord.leftIndex = state->newKeyIndex; + } + else + { + findResult.keyRecord.rightIndex = state->newKeyIndex; + } + + writeKeyRecord (findResult.keyRecord, findResult.keyIndex, state, false); + } + + // Write the new key + { + findResult.keyRecord.valFileOffset = state->valFileSize; + findResult.keyRecord.valSize = valueBytes; + findResult.keyRecord.leftIndex = 0; + findResult.keyRecord.rightIndex = 0; + + memcpy (findResult.keyRecord.key, key, m_keyBytes); + + writeKeyRecord (findResult.keyRecord, state->newKeyIndex, state, true); + } + + // Key file has grown by one. + ++state->newKeyIndex; + + // Write the value + writeValue (value, valueBytes, state); + } + else + { + // Key already exists, do nothing. + // We could check to make sure the payloads are the same. + } + } + else + { + // + // Write first key + // + + KeyRecord keyRecord (m_keyStorage.getData ()); + + keyRecord.valFileOffset = state->valFileSize; + keyRecord.valSize = valueBytes; + keyRecord.leftIndex = 0; + keyRecord.rightIndex = 0; + + memcpy (keyRecord.key, key, m_keyBytes); + + writeKeyRecord (keyRecord, state->newKeyIndex, state, true); + + // Key file has grown by one. + ++state->newKeyIndex; + + // + // Write value + // + + bassert (state->valFileSize == 0); + + writeValue (value, valueBytes, state); + } + } + + //-------------------------------------------------------------------------- + + void flush () + { + SharedState::WriteAccess state (m_state); + + flushInternal (state); + } + + void flushInternal (SharedState::WriteAccess& state) + { + state->keyFile.flush (); + state->valFile.flush (); + } +}; + +KeyvaDB* KeyvaDB::New (int keyBytes, int keyBlockDepth, File keyPath, File valPath) +{ + return new KeyvaDBImp (keyBytes, keyBlockDepth, keyPath, valPath); +} + +//------------------------------------------------------------------------------ + +class KeyvaDBTests : public UnitTest +{ +public: + enum + { + maxPayloadBytes = 8 * 1024 + }; + + KeyvaDBTests () : UnitTest ("KeyvaDB", "ripple") + { + } + + // Retrieval callback stores the value in a Payload object for comparison + struct PayloadGetCallback : KeyvaDB::GetCallback + { + UnitTestUtilities::Payload payload; + + PayloadGetCallback () : payload (maxPayloadBytes) + { + } + + void* getStorageForValue (int valueBytes) + { + bassert (valueBytes <= maxPayloadBytes); + + payload.bytes = valueBytes; + + return payload.data.getData (); + } + }; + + KeyvaDB* createDB (unsigned int keyBytes, File const& path) + { + File const keyPath = path.withFileExtension (".key"); + File const valPath = path.withFileExtension (".val"); + + return KeyvaDB::New (keyBytes, 1, keyPath, valPath); + } + + void deleteDBFiles (File const& path) + { + File const keyPath = path.withFileExtension (".key"); + File const valPath = path.withFileExtension (".val"); + + keyPath.deleteFile (); + valPath.deleteFile (); + } + + template + void testKeySize (unsigned int const maxItems) + { + using namespace UnitTestUtilities; + + typedef UnsignedInteger KeyType; + + int64 const seedValue = 50; + + String s; + + s << "keyBytes=" << String (uint64(KeyBytes)) << ", maxItems=" << String (maxItems); + beginTest (s); + + // Set up the key and value files + File const path (File::createTempFile ("")); + + { + // open the db + ScopedPointer db (createDB (KeyBytes, path)); + + Payload payload (maxPayloadBytes); + Payload check (maxPayloadBytes); + + { + // Create an array of ascending integers. + HeapBlock items (maxItems); + for (unsigned int i = 0; i < maxItems; ++i) + items [i] = i; + + // Now shuffle it deterministically. + repeatableShuffle (maxItems, items, seedValue); + + // Write all the keys of integers. + for (unsigned int i = 0; i < maxItems; ++i) + { + unsigned int keyIndex = items [i]; + + KeyType const key = KeyType::createFromInteger (keyIndex); + + payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue); + + db->put (key.cbegin (), payload.data.getData (), payload.bytes); + + { + // VFALCO TODO Check what we just wrote? + //db->get (key.cbegin (), check.data.getData (), payload.bytes); + } + } + } + + { + // Go through all of our keys and try to retrieve them. + // since this is done in ascending order, we should get + // random seeks at this point. + // + PayloadGetCallback cb; + for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex) + { + KeyType const v = KeyType::createFromInteger (keyIndex); + + bool const found = db->get (v.cbegin (), &cb); + + expect (found, "Should be found"); + + if (found) + { + payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue); + + expect (payload == cb.payload, "Should be equal"); + } + } + } + } + + { + // Re-open the database and confirm the data + ScopedPointer db (createDB (KeyBytes, path)); + + Payload payload (maxPayloadBytes); + Payload check (maxPayloadBytes); + + PayloadGetCallback cb; + for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex) + { + KeyType const v = KeyType::createFromInteger (keyIndex); + + bool const found = db->get (v.cbegin (), &cb); + + expect (found, "Should be found"); + + if (found) + { + payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue); + + expect (payload == cb.payload, "Should be equal"); + } + } + } + + deleteDBFiles (path); + } + + void runTest () + { + testKeySize <4> (500); + testKeySize <32> (4000); + } +}; + +static KeyvaDBTests keyvaDBTests; diff --git a/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h new file mode 100644 index 0000000000..20e4185f49 --- /dev/null +++ b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h @@ -0,0 +1,55 @@ +//------------------------------------------------------------------------------ +/* + This file is part of Beast: https://github.com/vinniefalco/Beast + Copyright 2013, Vinnie Falco + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef BEAST_KEYVADB_H_INCLUDED +#define BEAST_KEYVADB_H_INCLUDED + +/** Specialized Key/value database + + Once written, a value can never be modified. +*/ +class KeyvaDB : LeakChecked +{ +public: + class GetCallback + { + public: + virtual void* getStorageForValue (int valueBytes) = 0; + }; + + static KeyvaDB* New (int keyBytes, + int keyBlockDepth, + File keyPath, + File valPath); + + virtual ~KeyvaDB () { } + + // VFALCO TODO Make the return value a Result so we can + // detect corruption and errors! + // + virtual bool get (void const* key, GetCallback* callback) = 0; + + // VFALCO TODO Use Result for return value + // + virtual void put (void const* key, void const* value, int valueBytes) = 0; + + virtual void flush () = 0; +}; + +#endif diff --git a/libraries/liblmdb/.gitignore b/Subtrees/mdb/libraries/liblmdb/.gitignore similarity index 100% rename from libraries/liblmdb/.gitignore rename to Subtrees/mdb/libraries/liblmdb/.gitignore diff --git a/libraries/liblmdb/COPYRIGHT b/Subtrees/mdb/libraries/liblmdb/COPYRIGHT similarity index 100% rename from libraries/liblmdb/COPYRIGHT rename to Subtrees/mdb/libraries/liblmdb/COPYRIGHT diff --git a/libraries/liblmdb/Doxyfile b/Subtrees/mdb/libraries/liblmdb/Doxyfile similarity index 100% rename from libraries/liblmdb/Doxyfile rename to Subtrees/mdb/libraries/liblmdb/Doxyfile diff --git a/libraries/liblmdb/LICENSE b/Subtrees/mdb/libraries/liblmdb/LICENSE similarity index 100% rename from libraries/liblmdb/LICENSE rename to Subtrees/mdb/libraries/liblmdb/LICENSE diff --git a/libraries/liblmdb/Makefile b/Subtrees/mdb/libraries/liblmdb/Makefile similarity index 87% rename from libraries/liblmdb/Makefile rename to Subtrees/mdb/libraries/liblmdb/Makefile index 25c52ada8e..8255d8b438 100644 --- a/libraries/liblmdb/Makefile +++ b/Subtrees/mdb/libraries/liblmdb/Makefile @@ -3,7 +3,9 @@ ######################################################################## # Configuration. The compiler options must enable threaded compilation. # -# Preprocessor macros (for CPPFLAGS) of interest: +# Preprocessor macros (for CPPFLAGS) of interest... +# Note that the defaults should already be correct for most +# platforms; you should not need to change any of these: # # To compile successfully if the default does not: # - MDB_USE_POSIX_SEM (enabled by default on BSD, Apple) @@ -11,7 +13,7 @@ # semaphores and shared mutexes have different behaviors and # different problems, see the Caveats section in lmdb.h. # -# For best performence or to compile successfully: +# For best performance or to compile successfully: # - MDB_DSYNC = "O_DSYNC" (default) or "O_SYNC" (less efficient) # If O_DSYNC is undefined but exists in /usr/include, # preferably set some compiler flag to get the definition. @@ -25,14 +27,13 @@ # Data format: # - MDB_MAXKEYSIZE # Controls data packing and limits, see mdb.c. -# -# Debugging: -# - MDB_DEBUG, MDB_PARANOID. +# You might need to change this if the default size is too small. # CC = gcc W = -W -Wall -Wno-unused-parameter -Wbad-function-cast +THREADS = -pthread OPT = -O2 -g -CFLAGS = -pthread $(OPT) $(W) $(XCFLAGS) +CFLAGS = $(THREADS) $(OPT) $(W) $(XCFLAGS) LDLIBS = SOLIBS = prefix = /usr/local diff --git a/libraries/liblmdb/lmdb.h b/Subtrees/mdb/libraries/liblmdb/lmdb.h similarity index 97% rename from libraries/liblmdb/lmdb.h rename to Subtrees/mdb/libraries/liblmdb/lmdb.h index 9f00a04202..b3cd5ef79e 100644 --- a/libraries/liblmdb/lmdb.h +++ b/Subtrees/mdb/libraries/liblmdb/lmdb.h @@ -166,7 +166,7 @@ typedef int mdb_filehandle_t; /** Library minor version */ #define MDB_VERSION_MINOR 9 /** Library patch version */ -#define MDB_VERSION_PATCH 6 +#define MDB_VERSION_PATCH 7 /** Combine args a,b,c into a single integer for easy version comparisons */ #define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c)) @@ -889,6 +889,15 @@ int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *d */ int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); + /** @brief Retrieve the DB flags for a database handle. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] flags Address where the flags will be returned. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_dbi_flags(MDB_env *env, MDB_dbi dbi, unsigned int *flags); + /** @brief Close a database handle. * * This call is not mutex protected. Handles should only be closed by @@ -1289,6 +1298,31 @@ int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); * @return < 0 if a < b, 0 if a == b, > 0 if a > b */ int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); + + /** @brief A callback function used to print a message from the library. + * + * @param[in] msg The string to be printed. + * @param[in] ctx An arbitrary context pointer for the callback. + * @return < 0 on failure, 0 on success. + */ +typedef int (MDB_msg_func)(const char *msg, void *ctx); + + /** @brief Dump the entries in the reader lock table. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] func A #MDB_msg_func function + * @param[in] ctx Anything the message function needs + * @return < 0 on failure, 0 on success. + */ +int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); + + /** @brief Check for stale entries in the reader lock table. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] dead Number of stale slots that were cleared + * @return 0 on success, non-zero on failure. + */ +int mdb_reader_check(MDB_env *env, int *dead); /** @} */ #ifdef __cplusplus diff --git a/libraries/liblmdb/mdb.c b/Subtrees/mdb/libraries/liblmdb/mdb.c similarity index 91% rename from libraries/liblmdb/mdb.c rename to Subtrees/mdb/libraries/liblmdb/mdb.c index 620e5b51ff..4d686007ba 100644 --- a/libraries/liblmdb/mdb.c +++ b/Subtrees/mdb/libraries/liblmdb/mdb.c @@ -344,8 +344,10 @@ static txnid_t mdb_debug_start; */ #define MDB_MAGIC 0xBEEFC0DE - /** The version number for a database's file format. */ -#define MDB_VERSION 1 + /** The version number for a database's datafile format. */ +#define MDB_DATA_VERSION 1 + /** The version number for a database's lockfile format. */ +#define MDB_LOCK_VERSION 1 /** @brief The maximum size of a key in the database. * @@ -513,7 +515,7 @@ typedef struct MDB_txbody { /** Stamp identifying this as an MDB file. It must be set * to #MDB_MAGIC. */ uint32_t mtb_magic; - /** Version number of this lock file. Must be set to #MDB_VERSION. */ + /** Version number of this lock file. Must be set to #MDB_LOCK_VERSION. */ uint32_t mtb_version; #if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) char mtb_rmname[MNAME_LEN]; @@ -585,6 +587,7 @@ typedef struct MDB_page { #define P_DIRTY 0x10 /**< dirty page */ #define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */ #define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */ +#define P_KEEP 0x8000 /**< leave this page alone during spill */ /** @} */ uint16_t mp_flags; /**< @ref mdb_page */ #define mp_lower mp_pb.pb.pb_lower @@ -769,7 +772,7 @@ typedef struct MDB_meta { /** Stamp identifying this as an MDB file. It must be set * to #MDB_MAGIC. */ uint32_t mm_magic; - /** Version number of this lock file. Must be set to #MDB_VERSION. */ + /** Version number of this lock file. Must be set to #MDB_DATA_VERSION. */ uint32_t mm_version; void *mm_address; /**< address for fixed mapping */ size_t mm_mapsize; /**< size of mmap region */ @@ -824,6 +827,10 @@ struct MDB_txn { /** The list of pages that became unused during this transaction. */ MDB_IDL mt_free_pgs; + /** The list of dirty pages we temporarily wrote to disk + * because the dirty list was full. + */ + MDB_IDL mt_spill_pgs; union { MDB_ID2L dirty_list; /**< for write txns: modified pages */ MDB_reader *reader; /**< this thread's reader table slot or NULL */ @@ -857,6 +864,7 @@ struct MDB_txn { #define MDB_TXN_RDONLY 0x01 /**< read-only transaction */ #define MDB_TXN_ERROR 0x02 /**< an error has occurred */ #define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */ +#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */ /** @} */ unsigned int mt_flags; /**< @ref mdb_txn */ /** dirty_list maxsize - # of allocated pages allowed, including in parent txns */ @@ -944,6 +952,8 @@ struct MDB_env { #define MDB_ENV_ACTIVE 0x20000000U /** me_txkey is set */ #define MDB_ENV_TXKEY 0x10000000U + /** Have liveness lock in reader table */ +#define MDB_LIVE_READER 0x08000000U uint32_t me_flags; /**< @ref mdb_env */ unsigned int me_psize; /**< size of a page, from #GET_PAGESIZE */ unsigned int me_maxreaders; /**< size of the reader table */ @@ -975,6 +985,7 @@ struct MDB_env { /** Max size of a node on a page */ unsigned int me_nodemax; #ifdef _WIN32 + int me_pidquery; /**< Used in OpenProcess */ HANDLE me_rmutex; /* Windows mutexes don't reside in shared mem */ HANDLE me_wmutex; #elif defined(MDB_USE_POSIX_SEM) @@ -1306,7 +1317,7 @@ mdb_dpage_free(MDB_env *env, MDB_page *dp) } } -/* Return all dirty pages to dpage list */ +/** Return all dirty pages to dpage list */ static void mdb_dlist_free(MDB_txn *txn) { @@ -1320,6 +1331,183 @@ mdb_dlist_free(MDB_txn *txn) dl[0].mid = 0; } +/* Set or clear P_KEEP in non-overflow, non-sub pages in known cursors. + * When clearing, only consider backup cursors (from parent txns) since + * other P_KEEP flags have already been cleared. + * @param[in] mc A cursor handle for the current operation. + * @param[in] pflags Flags of the pages to update: + * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it. + */ +static void +mdb_cursorpages_mark(MDB_cursor *mc, unsigned pflags) +{ + MDB_txn *txn = mc->mc_txn; + MDB_cursor *m2, *m3; + MDB_xcursor *mx; + unsigned i, j; + + if (mc->mc_flags & C_UNTRACK) + mc = NULL; /* will find mc in mt_cursors */ + for (i = txn->mt_numdbs;; mc = txn->mt_cursors[--i]) { + for (; mc; mc=mc->mc_next) { + m2 = pflags == P_DIRTY ? mc : mc->mc_backup; + for (; m2; m2 = m2->mc_backup) { + for (m3=m2; m3->mc_flags & C_INITIALIZED; m3=&mx->mx_cursor) { + for (j=0; jmc_snum; j++) + if ((m3->mc_pg[j]->mp_flags & (P_SUBP|P_DIRTY|P_KEEP)) + == pflags) + m3->mc_pg[j]->mp_flags ^= P_KEEP; + if (!(m3->mc_db->md_flags & MDB_DUPSORT)) + break; + /* Cursor backups have mx malloced at the end of m2 */ + mx = (m3 == mc ? m3->mc_xcursor : (MDB_xcursor *)(m3+1)); + } + } + } + if (i == 0) + break; + } +} + +static int mdb_page_flush(MDB_txn *txn); + +/** Spill pages from the dirty list back to disk. + * This is intended to prevent running into #MDB_TXN_FULL situations, + * but note that they may still occur in a few cases: + * 1) pages in #MDB_DUPSORT sub-DBs are never spilled, so if there + * are too many of these dirtied in one txn, the txn may still get + * too full. + * 2) child txns may run out of space if their parents dirtied a + * lot of pages and never spilled them. TODO: we probably should do + * a preemptive spill during #mdb_txn_begin() of a child txn, if + * the parent's dirty_room is below a given threshold. + * 3) our estimate of the txn size could be too small. At the + * moment this seems unlikely. + * + * Otherwise, if not using nested txns, it is expected that apps will + * not run into #MDB_TXN_FULL any more. The pages are flushed to disk + * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared. + * If the txn never references them again, they can be left alone. + * If the txn only reads them, they can be used without any fuss. + * If the txn writes them again, they can be dirtied immediately without + * going thru all of the work of #mdb_page_touch(). Such references are + * handled by #mdb_page_unspill(). + * + * Also note, we never spill DB root pages, nor pages of active cursors, + * because we'll need these back again soon anyway. And in nested txns, + * we can't spill a page in a child txn if it was already spilled in a + * parent txn. That would alter the parent txns' data even though + * the child hasn't committed yet, and we'd have no way to undo it if + * the child aborted. + * + * @param[in] m0 cursor A cursor handle identifying the transaction and + * database for which we are checking space. + * @param[in] key For a put operation, the key being stored. + * @param[in] data For a put operation, the data being stored. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data) +{ + MDB_txn *txn = m0->mc_txn; + MDB_page *dp; + MDB_ID2L dl = txn->mt_u.dirty_list; + unsigned int i, j; + int rc; + + if (m0->mc_flags & C_SUB) + return MDB_SUCCESS; + + /* Estimate how much space this op will take */ + i = m0->mc_db->md_depth; + /* Named DBs also dirty the main DB */ + if (m0->mc_dbi > MAIN_DBI) + i += txn->mt_dbs[MAIN_DBI].md_depth; + /* For puts, roughly factor in the key+data size */ + if (key) + i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize; + i += i; /* double it for good measure */ + + if (txn->mt_dirty_room > i) + return MDB_SUCCESS; + + if (!txn->mt_spill_pgs) { + txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX); + if (!txn->mt_spill_pgs) + return ENOMEM; + } + + /* Mark all the dirty root pages we want to preserve */ + for (i=0; imt_numdbs; i++) { + if (txn->mt_dbflags[i] & DB_DIRTY) { + j = mdb_mid2l_search(dl, txn->mt_dbs[i].md_root); + if (j <= dl[0].mid) { + dp = dl[j].mptr; + dp->mp_flags |= P_KEEP; + } + } + } + + /* Preserve pages used by cursors */ + mdb_cursorpages_mark(m0, P_DIRTY); + + /* Save the page IDs of all the pages we're flushing */ + for (i=1; i<=dl[0].mid; i++) { + dp = dl[i].mptr; + if (dp->mp_flags & P_KEEP) + continue; + /* Can't spill twice, make sure it's not already in a parent's + * spill list. + */ + if (txn->mt_parent) { + MDB_txn *tx2; + for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { + if (tx2->mt_spill_pgs) { + j = mdb_midl_search(tx2->mt_spill_pgs, dl[i].mid); + if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == dl[i].mid) { + dp->mp_flags |= P_KEEP; + break; + } + } + } + if (tx2) + continue; + } + if ((rc = mdb_midl_append(&txn->mt_spill_pgs, dl[i].mid))) + return rc; + } + mdb_midl_sort(txn->mt_spill_pgs); + + rc = mdb_page_flush(txn); + + mdb_cursorpages_mark(m0, P_DIRTY|P_KEEP); + + if (rc == 0) { + if (txn->mt_parent) { + MDB_txn *tx2; + pgno_t pgno = dl[i].mid; + txn->mt_dirty_room = txn->mt_parent->mt_dirty_room - dl[0].mid; + /* dirty pages that are dirty in an ancestor don't + * count against this txn's dirty_room. + */ + for (i=1; i<=dl[0].mid; i++) { + for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { + j = mdb_mid2l_search(tx2->mt_u.dirty_list, pgno); + if (j <= tx2->mt_u.dirty_list[0].mid && + tx2->mt_u.dirty_list[j].mid == pgno) { + txn->mt_dirty_room++; + break; + } + } + } + } else { + txn->mt_dirty_room = MDB_IDL_UM_MAX - dl[0].mid; + } + txn->mt_flags |= MDB_TXN_SPILLS; + } + return rc; +} + /** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */ static txnid_t mdb_find_oldest(MDB_txn *txn) @@ -1337,6 +1525,24 @@ mdb_find_oldest(MDB_txn *txn) return oldest; } +/** Add a page to the txn's dirty list */ +static void +mdb_page_dirty(MDB_txn *txn, MDB_page *mp) +{ + MDB_ID2 mid; + int (*insert)(MDB_ID2L, MDB_ID2 *); + + if (txn->mt_env->me_flags & MDB_WRITEMAP) { + insert = mdb_mid2l_append; + } else { + insert = mdb_mid2l_insert; + } + mid.mid = mp->mp_pgno; + mid.mptr = mp; + insert(txn->mt_u.dirty_list, &mid); + txn->mt_dirty_room--; +} + /** Allocate pages for writing. * If there are free pages available from older transactions, they * will be re-used first. Otherwise a new page will be allocated. @@ -1367,11 +1573,9 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp) pgno_t pgno, *mop = env->me_pghead; unsigned i, j, k, mop_len = mop ? mop[0] : 0; MDB_page *np; - MDB_ID2 mid; txnid_t oldest = 0, last; MDB_cursor_op op; MDB_cursor m2; - int (*insert)(MDB_ID2L, MDB_ID2 *); *mp = NULL; @@ -1474,11 +1678,9 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp) search_done: if (env->me_flags & MDB_WRITEMAP) { np = (MDB_page *)(env->me_map + env->me_psize * pgno); - insert = mdb_mid2l_append; } else { if (!(np = mdb_page_malloc(txn, num))) return ENOMEM; - insert = mdb_mid2l_insert; } if (i) { mop[0] = mop_len -= num; @@ -1488,10 +1690,8 @@ search_done: } else { txn->mt_next_pgno = pgno + num; } - mid.mid = np->mp_pgno = pgno; - mid.mptr = np; - insert(txn->mt_u.dirty_list, &mid); - txn->mt_dirty_room--; + np->mp_pgno = pgno; + mdb_page_dirty(txn, np); *mp = np; return MDB_SUCCESS; @@ -1521,6 +1721,77 @@ mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize) } } +/** Pull a page off the txn's spill list, if present. + * If a page being referenced was spilled to disk in this txn, bring + * it back and make it dirty/writable again. + * @param[in] tx0 the transaction handle. + * @param[in] mp the page being referenced. + * @param[out] ret the writable page, if any. ret is unchanged if + * mp wasn't spilled. + */ +static int +mdb_page_unspill(MDB_txn *tx0, MDB_page *mp, MDB_page **ret) +{ + MDB_env *env = tx0->mt_env; + MDB_txn *txn; + unsigned x; + pgno_t pgno = mp->mp_pgno; + + for (txn = tx0; txn; txn=txn->mt_parent) { + if (!txn->mt_spill_pgs) + continue; + x = mdb_midl_search(txn->mt_spill_pgs, pgno); + if (x <= txn->mt_spill_pgs[0] && txn->mt_spill_pgs[x] == pgno) { + MDB_page *np; + int num; + if (IS_OVERFLOW(mp)) + num = mp->mp_pages; + else + num = 1; + if (env->me_flags & MDB_WRITEMAP) { + np = mp; + } else { + np = mdb_page_malloc(txn, num); + if (!np) + return ENOMEM; + if (num > 1) + memcpy(np, mp, num * env->me_psize); + else + mdb_page_copy(np, mp, env->me_psize); + } + if (txn == tx0) { + /* If in current txn, this page is no longer spilled */ + for (; x < txn->mt_spill_pgs[0]; x++) + txn->mt_spill_pgs[x] = txn->mt_spill_pgs[x+1]; + txn->mt_spill_pgs[0]--; + } /* otherwise, if belonging to a parent txn, the + * page remains spilled until child commits + */ + + if (txn->mt_parent) { + MDB_txn *tx2; + /* If this page is also in a parent's dirty list, then + * it's already accounted in dirty_room, and we need to + * cancel out the decrement that mdb_page_dirty does. + */ + for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { + x = mdb_mid2l_search(tx2->mt_u.dirty_list, pgno); + if (x <= tx2->mt_u.dirty_list[0].mid && + tx2->mt_u.dirty_list[x].mid == pgno) { + txn->mt_dirty_room++; + break; + } + } + } + mdb_page_dirty(tx0, np); + np->mp_flags |= P_DIRTY; + *ret = np; + break; + } + } + return MDB_SUCCESS; +} + /** Touch a page: make it dirty and re-insert into tree with updated pgno. * @param[in] mc cursor pointing to the page to be touched * @return 0 on success, non-zero on failure. @@ -1536,6 +1807,14 @@ mdb_page_touch(MDB_cursor *mc) int rc; if (!F_ISSET(mp->mp_flags, P_DIRTY)) { + if (txn->mt_flags & MDB_TXN_SPILLS) { + np = NULL; + rc = mdb_page_unspill(txn, mp, &np); + if (rc) + return rc; + if (np) + goto done; + } if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) || (rc = mdb_page_alloc(mc, 1, &np))) return rc; @@ -1583,6 +1862,7 @@ mdb_page_touch(MDB_cursor *mc) np->mp_pgno = pgno; np->mp_flags |= P_DIRTY; +done: /* Adjust cursors pointing to mp */ mc->mc_pg[mc->mc_top] = np; dbi = mc->mc_dbi; @@ -1717,6 +1997,56 @@ mdb_cursors_close(MDB_txn *txn, unsigned merge) static void mdb_txn_reset0(MDB_txn *txn, const char *act); +#ifdef _WIN32 +enum Pidlock_op { + Pidset, Pidcheck +}; +#else +enum Pidlock_op { + Pidset = F_SETLK, Pidcheck = F_GETLK +}; +#endif + +/** Set or check a pid lock. Set returns 0 on success. + * Check returns 0 if lock exists (meaning the process is alive). + * + * On Windows Pidset is a no-op, we merely check for the existence + * of the process with the given pid. On POSIX we use a single byte + * lock on the lockfile, set at an offset equal to the pid. + */ +static int +mdb_reader_pid(MDB_env *env, enum Pidlock_op op, pid_t pid) +{ +#ifdef _WIN32 + HANDLE h; + int ver, query; + switch(op) { + case Pidset: + break; + case Pidcheck: + h = OpenProcess(env->me_pidquery, FALSE, pid); + if (!h) + return GetLastError(); + CloseHandle(h); + break; + } + return 0; +#else + int rc; + struct flock lock_info; + memset((void *)&lock_info, 0, sizeof(lock_info)); + lock_info.l_type = F_WRLCK; + lock_info.l_whence = SEEK_SET; + lock_info.l_start = pid; + lock_info.l_len = 1; + while ((rc = fcntl(env->me_lfd, op, &lock_info)) && + (rc = ErrCode()) == EINTR) ; + if (op == F_GETLK && rc == 0 && lock_info.l_type == F_UNLCK) + rc = -1; + return rc; +#endif +} + /** Common code for #mdb_txn_begin() and #mdb_txn_renew(). * @param[in] txn the transaction handle to initialize * @return 0 on success, non-zero on failure. @@ -1748,6 +2078,15 @@ mdb_txn_renew0(MDB_txn *txn) pid_t pid = env->me_pid; pthread_t tid = pthread_self(); + if (!(env->me_flags & MDB_LIVE_READER)) { + rc = mdb_reader_pid(env, Pidset, pid); + if (rc) { + UNLOCK_MUTEX_R(env); + return rc; + } + env->me_flags |= MDB_LIVE_READER; + } + LOCK_MUTEX_R(env); for (i=0; ime_txns->mti_numreaders; i++) if (env->me_txns->mti_readers[i].mr_pid == 0) @@ -1789,6 +2128,7 @@ mdb_txn_renew0(MDB_txn *txn) txn->mt_u.dirty_list[0].mid = 0; txn->mt_free_pgs = env->me_free_pgs; txn->mt_free_pgs[0] = 0; + txn->mt_spill_pgs = NULL; env->me_txn = txn; } @@ -1894,6 +2234,7 @@ mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret) txn->mt_toggle = parent->mt_toggle; txn->mt_dirty_room = parent->mt_dirty_room; txn->mt_u.dirty_list[0].mid = 0; + txn->mt_spill_pgs = NULL; txn->mt_next_pgno = parent->mt_next_pgno; parent->mt_child = txn; txn->mt_parent = parent; @@ -1996,6 +2337,7 @@ mdb_txn_reset0(MDB_txn *txn, const char *act) txn->mt_parent->mt_child = NULL; env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate; mdb_midl_free(txn->mt_free_pgs); + mdb_midl_free(txn->mt_spill_pgs); free(txn->mt_u.dirty_list); return; } @@ -2159,25 +2501,32 @@ mdb_freelist_save(MDB_txn *txn) total_room += head_room; } - /* Fill in the reserved, touched me_pghead records. Avoid write ops - * so they cannot rearrange anything, just read the destinations. - */ + /* Fill in the reserved, touched me_pghead records */ rc = MDB_SUCCESS; if (mop_len) { MDB_val key, data; - mop += mop_len + 1; + mop += mop_len; rc = mdb_cursor_first(&mc, &key, &data); for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) { - MDB_IDL dest = data.mv_data; + unsigned flags = MDB_CURRENT; + txnid_t id = *(txnid_t *)key.mv_data; ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1; + MDB_ID save; - assert(len >= 0 && *(txnid_t*)key.mv_data <= env->me_pglast); - if (len > mop_len) + assert(len >= 0 && id <= env->me_pglast); + key.mv_data = &id; + if (len > mop_len) { len = mop_len; - *dest++ = len; - memcpy(dest, mop -= len, len * sizeof(MDB_ID)); - if (! (mop_len -= len)) + data.mv_size = (len + 1) * sizeof(MDB_ID); + flags = 0; + } + data.mv_data = mop -= len; + save = mop[0]; + mop[0] = len; + rc = mdb_cursor_put(&mc, &key, &data, flags); + mop[0] = save; + if (rc || !(mop_len -= len)) break; } } @@ -2191,7 +2540,7 @@ mdb_page_flush(MDB_txn *txn) { MDB_env *env = txn->mt_env; MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned psize = env->me_psize; + unsigned psize = env->me_psize, j; int i, pagecount = dl[0].mid, rc; size_t size = 0, pos = 0; pgno_t pgno = 0; @@ -2205,13 +2554,20 @@ mdb_page_flush(MDB_txn *txn) int n = 0; #endif + j = 0; if (env->me_flags & MDB_WRITEMAP) { /* Clear dirty flags */ for (i = pagecount; i; i--) { dp = dl[i].mptr; + /* Don't flush this page yet */ + if (dp->mp_flags & P_KEEP) { + dp->mp_flags ^= P_KEEP; + dl[++j] = dl[i]; + continue; + } dp->mp_flags &= ~P_DIRTY; } - dl[0].mid = 0; + dl[0].mid = j; return MDB_SUCCESS; } @@ -2219,6 +2575,12 @@ mdb_page_flush(MDB_txn *txn) for (i = 1;; i++) { if (i <= pagecount) { dp = dl[i].mptr; + /* Don't flush this page yet */ + if (dp->mp_flags & P_KEEP) { + dp->mp_flags ^= P_KEEP; + dl[i].mid = 0; + continue; + } pgno = dl[i].mid; /* clear dirty flag */ dp->mp_flags &= ~P_DIRTY; @@ -2290,7 +2652,18 @@ mdb_page_flush(MDB_txn *txn) #endif /* _WIN32 */ } - mdb_dlist_free(txn); + j = 0; + for (i=1; i<=pagecount; i++) { + dp = dl[i].mptr; + /* This is a page we skipped above */ + if (!dl[i].mid) { + dl[++j] = dl[i]; + dl[j].mid = dp->mp_pgno; + continue; + } + mdb_dpage_free(env, dp); + } + dl[0].mid = j; return MDB_SUCCESS; } @@ -2348,17 +2721,48 @@ mdb_txn_commit(MDB_txn *txn) /* Update parent's DB table. */ memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - txn->mt_parent->mt_numdbs = txn->mt_numdbs; - txn->mt_parent->mt_dbflags[0] = txn->mt_dbflags[0]; - txn->mt_parent->mt_dbflags[1] = txn->mt_dbflags[1]; + parent->mt_numdbs = txn->mt_numdbs; + parent->mt_dbflags[0] = txn->mt_dbflags[0]; + parent->mt_dbflags[1] = txn->mt_dbflags[1]; for (i=2; imt_numdbs; i++) { /* preserve parent's DB_NEW status */ - x = txn->mt_parent->mt_dbflags[i] & DB_NEW; - txn->mt_parent->mt_dbflags[i] = txn->mt_dbflags[i] | x; + x = parent->mt_dbflags[i] & DB_NEW; + parent->mt_dbflags[i] = txn->mt_dbflags[i] | x; } - dst = txn->mt_parent->mt_u.dirty_list; + dst = parent->mt_u.dirty_list; src = txn->mt_u.dirty_list; + /* Remove anything in our dirty list from parent's spill list */ + if (parent->mt_spill_pgs) { + x = parent->mt_spill_pgs[0]; + len = x; + /* zero out our dirty pages in parent spill list */ + for (i=1; i<=src[0].mid; i++) { + if (src[i].mid < parent->mt_spill_pgs[x]) + continue; + if (src[i].mid > parent->mt_spill_pgs[x]) { + if (x <= 1) + break; + x--; + continue; + } + parent->mt_spill_pgs[x] = 0; + len--; + } + /* OK, we had a few hits, squash zeros from the spill list */ + if (len < parent->mt_spill_pgs[0]) { + x=1; + for (y=1; y<=parent->mt_spill_pgs[0]; y++) { + if (parent->mt_spill_pgs[y]) { + if (y != x) { + parent->mt_spill_pgs[x] = parent->mt_spill_pgs[y]; + } + x++; + } + } + parent->mt_spill_pgs[0] = len; + } + } /* Find len = length of merging our dirty list with parent's */ x = dst[0].mid; dst[0].mid = 0; /* simplify loops */ @@ -2390,8 +2794,17 @@ mdb_txn_commit(MDB_txn *txn) dst[0].mid = len; free(txn->mt_u.dirty_list); parent->mt_dirty_room = txn->mt_dirty_room; + if (txn->mt_spill_pgs) { + if (parent->mt_spill_pgs) { + mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs); + mdb_midl_free(txn->mt_spill_pgs); + mdb_midl_sort(parent->mt_spill_pgs); + } else { + parent->mt_spill_pgs = txn->mt_spill_pgs; + } + } - txn->mt_parent->mt_child = NULL; + parent->mt_child = NULL; mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead); free(txn); return MDB_SUCCESS; @@ -2487,6 +2900,8 @@ mdb_env_read_header(MDB_env *env, MDB_meta *meta) memset(&ov, 0, sizeof(ov)); ov.Offset = off; rc = ReadFile(env->me_fd,&pbuf,MDB_PAGESIZE,&len,&ov) ? (int)len : -1; + if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF) + rc = 0; #else rc = pread(env->me_fd, &pbuf, MDB_PAGESIZE, off); #endif @@ -2511,9 +2926,9 @@ mdb_env_read_header(MDB_env *env, MDB_meta *meta) return MDB_INVALID; } - if (m->mm_version != MDB_VERSION) { + if (m->mm_version != MDB_DATA_VERSION) { DPRINTF("database is version %u, expected version %u", - m->mm_version, MDB_VERSION); + m->mm_version, MDB_DATA_VERSION); return MDB_VERSION_MISMATCH; } @@ -2540,7 +2955,7 @@ mdb_env_init_meta(MDB_env *env, MDB_meta *meta) GET_PAGESIZE(psize); meta->mm_magic = MDB_MAGIC; - meta->mm_version = MDB_VERSION; + meta->mm_version = MDB_DATA_VERSION; meta->mm_mapsize = env->me_mapsize; meta->mm_psize = psize; meta->mm_last_pg = 1; @@ -2808,6 +3223,14 @@ mdb_env_open2(MDB_env *env) LONG sizelo, sizehi; sizelo = env->me_mapsize & 0xffffffff; sizehi = env->me_mapsize >> 16 >> 16; /* only needed on Win64 */ + + /* See if we should use QueryLimited */ + rc = GetVersion(); + if ((rc & 0xff) > 5) + env->me_pidquery = PROCESS_QUERY_LIMITED_INFORMATION; + else + env->me_pidquery = PROCESS_QUERY_INFORMATION; + /* Windows won't create mappings for zero length files. * Just allocate the maxsize right now. */ @@ -3301,7 +3724,7 @@ mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl) pthread_mutexattr_destroy(&mattr); #endif /* _WIN32 || MDB_USE_POSIX_SEM */ - env->me_txns->mti_version = MDB_VERSION; + env->me_txns->mti_version = MDB_LOCK_VERSION; env->me_txns->mti_magic = MDB_MAGIC; env->me_txns->mti_txnid = 0; env->me_txns->mti_numreaders = 0; @@ -3312,9 +3735,9 @@ mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl) rc = MDB_INVALID; goto fail; } - if (env->me_txns->mti_version != MDB_VERSION) { + if (env->me_txns->mti_version != MDB_LOCK_VERSION) { DPRINTF("lock region is version %u, expected version %u", - env->me_txns->mti_version, MDB_VERSION); + env->me_txns->mti_version, MDB_LOCK_VERSION); rc = MDB_VERSION_MISMATCH; goto fail; } @@ -3970,6 +4393,19 @@ mdb_page_get(MDB_txn *txn, pgno_t pgno, MDB_page **ret, int *lvl) level = 1; do { MDB_ID2L dl = tx2->mt_u.dirty_list; + unsigned x; + /* Spilled pages were dirtied in this txn and flushed + * because the dirty list got full. Bring this page + * back in from the map (but don't unspill it here, + * leave that unless page_touch happens again). + */ + if (tx2->mt_spill_pgs) { + x = mdb_midl_search(tx2->mt_spill_pgs, pgno); + if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pgno) { + p = (MDB_page *)(txn->mt_env->me_map + txn->mt_env->me_psize * pgno); + goto done; + } + } if (dl[0].mid) { unsigned x = mdb_mid2l_search(dl, pgno); if (x <= dl[0].mid && dl[x].mid == pgno) { @@ -4070,6 +4506,8 @@ mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int modify) DPRINTF("found leaf page %zu for key [%s]", mp->mp_pgno, key ? DKEY(key) : NULL); + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; return MDB_SUCCESS; } @@ -4197,11 +4635,21 @@ mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp) int rc; DPRINTF("free ov page %zu (%d)", pg, ovpages); - /* If the page is dirty we just acquired it, so we should - * give it back to our current free list, if any. + /* If the page is dirty or on the spill list we just acquired it, + * so we should give it back to our current free list, if any. * Not currently supported in nested txns. * Otherwise put it onto the list of pages we freed in this txn. */ + if (!(mp->mp_flags & P_DIRTY) && txn->mt_spill_pgs) { + unsigned x = mdb_midl_search(txn->mt_spill_pgs, pg); + if (x <= txn->mt_spill_pgs[0] && txn->mt_spill_pgs[x] == pg) { + /* This page is no longer spilled */ + for (; x < txn->mt_spill_pgs[0]; x++) + txn->mt_spill_pgs[x] = txn->mt_spill_pgs[x+1]; + txn->mt_spill_pgs[0]--; + goto release; + } + } if ((mp->mp_flags & P_DIRTY) && !txn->mt_parent && env->me_pghead) { unsigned j, x; pgno_t *mop; @@ -4227,6 +4675,7 @@ mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp) } if (!(env->me_flags & MDB_WRITEMAP)) mdb_dpage_free(env, mp); +release: /* Insert in me_pghead */ mop = env->me_pghead; j = mop[0] + ovpages; @@ -4514,6 +4963,9 @@ mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, assert(key); assert(key->mv_size > 0); + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + /* See if we're already on the right page */ if (mc->mc_flags & C_INITIALIZED) { MDB_val nodekey; @@ -4686,6 +5138,9 @@ mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data) int rc; MDB_node *leaf; + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { rc = mdb_page_search(mc, NULL, 0); if (rc != MDB_SUCCESS) @@ -4712,8 +5167,6 @@ mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data) if (rc) return rc; } else { - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) return rc; } @@ -4729,6 +5182,9 @@ mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data) int rc; MDB_node *leaf; + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + if (!(mc->mc_flags & C_EOF)) { if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { @@ -4760,8 +5216,6 @@ mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data) if (rc) return rc; } else { - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) return rc; } @@ -4943,16 +5397,20 @@ mdb_cursor_touch(MDB_cursor *mc) return MDB_SUCCESS; } +/** Do not spill pages to disk if txn is getting full, may fail instead */ +#define MDB_NOSPILL 0x8000 + int mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, unsigned int flags) { + enum { MDB_NO_ROOT = MDB_LAST_ERRCODE+10 }; /* internal code */ MDB_node *leaf = NULL; MDB_val xdata, *rdata, dkey; MDB_page *fp; MDB_db dummy; int do_sub = 0, insert = 0; - unsigned int mcount = 0, dcount = 0; + unsigned int mcount = 0, dcount = 0, nospill; size_t nsize; int rc, rc2; MDB_pagebuf pbuf; @@ -4970,6 +5428,9 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, return EINVAL; } + nospill = flags & MDB_NOSPILL; + flags &= ~MDB_NOSPILL; + if (F_ISSET(mc->mc_txn->mt_flags, MDB_TXN_RDONLY)) return EACCES; @@ -4994,23 +5455,10 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, return EINVAL; rc = MDB_SUCCESS; } else if (mc->mc_db->md_root == P_INVALID) { - MDB_page *np; - /* new database, write a root leaf page */ - DPUTS("allocating new root leaf page"); - if ((rc = mdb_page_new(mc, P_LEAF, 1, &np))) { - return rc; - } + /* new database, cursor has nothing to point to */ mc->mc_snum = 0; - mdb_cursor_push(mc, np); - mc->mc_db->md_root = np->mp_pgno; - mc->mc_db->md_depth++; - *mc->mc_dbflag |= DB_DIRTY; - if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED)) - == MDB_DUPFIXED) - np->mp_flags |= P_LEAF2; - mc->mc_flags |= C_INITIALIZED; - rc = MDB_NOTFOUND; - goto top; + mc->mc_flags &= ~C_INITIALIZED; + rc = MDB_NO_ROOT; } else { int exact = 0; MDB_val d2; @@ -5028,7 +5476,7 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, } } } else { - rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact); + rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact); } if ((flags & MDB_NOOVERWRITE) && rc == 0) { DPRINTF("duplicate key [%s]", DKEY(key)); @@ -5039,12 +5487,40 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, return rc; } - /* Cursor is positioned, now make sure all pages are writable */ - rc2 = mdb_cursor_touch(mc); - if (rc2) - return rc2; + /* Cursor is positioned, check for room in the dirty list */ + if (!nospill) { + if (flags & MDB_MULTIPLE) { + rdata = &xdata; + xdata.mv_size = data->mv_size * dcount; + } else { + rdata = data; + } + if ((rc2 = mdb_page_spill(mc, key, rdata))) + return rc2; + } + + if (rc == MDB_NO_ROOT) { + MDB_page *np; + /* new database, write a root leaf page */ + DPUTS("allocating new root leaf page"); + if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) { + return rc2; + } + mdb_cursor_push(mc, np); + mc->mc_db->md_root = np->mp_pgno; + mc->mc_db->md_depth++; + *mc->mc_dbflag |= DB_DIRTY; + if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED)) + == MDB_DUPFIXED) + np->mp_flags |= P_LEAF2; + mc->mc_flags |= C_INITIALIZED; + } else { + /* make sure all cursor pages are writable */ + rc2 = mdb_cursor_touch(mc); + if (rc2) + return rc2; + } -top: /* The key already exists */ if (rc == MDB_SUCCESS) { /* there's only a key anyway, so this is a no-op */ @@ -5200,8 +5676,18 @@ current: return rc2; ovpages = omp->mp_pages; - /* Is the ov page writable and large enough? */ - if ((omp->mp_flags & P_DIRTY) && ovpages >= dpages) { + /* Is the ov page large enough? */ + if (ovpages >= dpages) { + if (!(omp->mp_flags & P_DIRTY) && + (level || (mc->mc_txn->mt_env->me_flags & MDB_WRITEMAP))) + { + rc = mdb_page_unspill(mc->mc_txn, omp, &omp); + if (rc) + return rc; + level = 0; /* dirty in this txn or clean */ + } + /* Is it dirty? */ + if (omp->mp_flags & P_DIRTY) { /* yes, overwrite it. Note in this case we don't * bother to try shrinking the page if the new data * is smaller than the overflow threshold. @@ -5234,10 +5720,10 @@ current: else memcpy(METADATA(omp), data->mv_data, data->mv_size); goto done; - } else { - if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS) - return rc2; + } } + if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS) + return rc2; } else if (NODEDSZ(leaf) == data->mv_size) { /* same size, just replace it. Note that we could * also reuse this node if the new data is smaller, @@ -5310,10 +5796,11 @@ put_sub: xdata.mv_data = ""; leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); if (flags & MDB_CURRENT) { - xflags = MDB_CURRENT; + xflags = MDB_CURRENT|MDB_NOSPILL; } else { mdb_xcursor_init1(mc, leaf); - xflags = (flags & MDB_NODUPDATA) ? MDB_NOOVERWRITE : 0; + xflags = (flags & MDB_NODUPDATA) ? + MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL; } /* converted, write the original data first */ if (dkey.mv_size) { @@ -5328,6 +5815,7 @@ put_sub: for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; + if (!(m2->mc_flags & C_INITIALIZED)) continue; if (m2->mc_pg[i] == mp && m2->mc_ki[i] == mc->mc_ki[i]) { mdb_xcursor_init1(m2, leaf); } @@ -5383,6 +5871,10 @@ mdb_cursor_del(MDB_cursor *mc, unsigned int flags) if (!(mc->mc_flags & C_INITIALIZED)) return EINVAL; + if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL))) + return rc; + flags &= ~MDB_NOSPILL; /* TODO: Or change (flags != MDB_NODUPDATA) to ~(flags & MDB_NODUPDATA), not looking at the logic of that code just now */ + rc = mdb_cursor_touch(mc); if (rc) return rc; @@ -5394,7 +5886,7 @@ mdb_cursor_del(MDB_cursor *mc, unsigned int flags) if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) { mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); } - rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, 0); + rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL); /* If sub-DB still has entries, we're done */ if (mc->mc_xcursor->mx_db.md_entries) { if (leaf->mn_flags & F_SUBDATA) { @@ -5852,6 +6344,7 @@ mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node) static void mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx) { + mc->mc_next = NULL; mc->mc_backup = NULL; mc->mc_dbi = dbi; mc->mc_txn = txn; @@ -6641,6 +7134,7 @@ mdb_del(MDB_txn *txn, MDB_dbi dbi, * run out of space, triggering a split. We need this * cursor to be consistent until the end of the rebalance. */ + mc.mc_flags |= C_UNTRACK; mc.mc_next = txn->mt_cursors[dbi]; txn->mt_cursors[dbi] = &mc; rc = mdb_cursor_del(&mc, data ? 0 : MDB_NODUPDATA); @@ -7025,7 +7519,7 @@ done: m3 = m2; if (m3 == mc) continue; - if (!(m3->mc_flags & C_INITIALIZED)) + if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) continue; if (m3->mc_flags & C_SPLITTING) continue; @@ -7321,6 +7815,15 @@ void mdb_dbi_close(MDB_env *env, MDB_dbi dbi) free(ptr); } +int mdb_dbi_flags(MDB_env *env, MDB_dbi dbi, unsigned int *flags) +{ + /* We could return the flags for the FREE_DBI too but what's the point? */ + if (dbi <= MAIN_DBI || dbi >= env->me_numdbs) + return EINVAL; + *flags = env->me_dbflags[dbi]; + return MDB_SUCCESS; +} + /** Add all the DB's pages to the free list. * @param[in] mc Cursor on the DB to free. * @param[in] subs non-Zero to check for sub-DBs in this DB. @@ -7485,4 +7988,125 @@ int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx) return MDB_SUCCESS; } +int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx) +{ + unsigned int i, rdrs; + MDB_reader *mr; + char buf[64]; + int first = 1; + + if (!env || !func) + return -1; + if (!env->me_txns) { + return func("(no reader locks)\n", ctx); + } + rdrs = env->me_txns->mti_numreaders; + mr = env->me_txns->mti_readers; + for (i=0; i> 1; + cursor = base + pivot + 1; + val = pid - ids[cursor]; + + if( val < 0 ) { + n = pivot; + + } else if ( val > 0 ) { + base = cursor; + n -= pivot + 1; + + } else { + /* found, so it's a duplicate */ + return -1; + } + } + + if( val > 0 ) { + ++cursor; + } + ids[0]++; + for (n = ids[0]; n > cursor; n--) + ids[n] = ids[n-1]; + ids[n] = pid; + return 0; +} + +int mdb_reader_check(MDB_env *env, int *dead) +{ + unsigned int i, j, rdrs; + MDB_reader *mr; + pid_t *pids, pid; + int count = 0; + + if (!env) + return EINVAL; + if (dead) + *dead = 0; + if (!env->me_txns) + return MDB_SUCCESS; + rdrs = env->me_txns->mti_numreaders; + pids = malloc((rdrs+1) * sizeof(pid_t)); + if (!pids) + return ENOMEM; + pids[0] = 0; + mr = env->me_txns->mti_readers; + j = 0; + for (i=0; ime_pid) { + pid = mr[i].mr_pid; + if (mdb_pid_insert(pids, pid) == 0) { + if (mdb_reader_pid(env, Pidcheck, pid)) { + LOCK_MUTEX_R(env); + if (mdb_reader_pid(env, Pidcheck, pid)) { + for (j=i; j 1) { + int dead; + mdb_reader_check(env, &dead); + printf(" %d stale readers cleared.\n", dead); + rc = mdb_reader_list(env, (MDB_msg_func *)fputs, stdout); + } + if (!(subname || alldbs || freinfo)) + goto env_close; + } + + rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); + if (rc) { + printf("mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + if (freinfo) { MDB_cursor *cursor; MDB_val key, data; diff --git a/libraries/liblmdb/midl.c b/Subtrees/mdb/libraries/liblmdb/midl.c similarity index 99% rename from libraries/liblmdb/midl.c rename to Subtrees/mdb/libraries/liblmdb/midl.c index e7bd680cb0..86e4592d2d 100644 --- a/libraries/liblmdb/midl.c +++ b/Subtrees/mdb/libraries/liblmdb/midl.c @@ -31,8 +31,7 @@ */ #define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) ) -#if 0 /* superseded by append/sort */ -static unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) +unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) { /* * binary search of id in ids @@ -67,6 +66,7 @@ static unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) return cursor; } +#if 0 /* superseded by append/sort */ int mdb_midl_insert( MDB_IDL ids, MDB_ID id ) { unsigned x, i; diff --git a/libraries/liblmdb/midl.h b/Subtrees/mdb/libraries/liblmdb/midl.h similarity index 94% rename from libraries/liblmdb/midl.h rename to Subtrees/mdb/libraries/liblmdb/midl.h index 9ce7133c6e..b0bdff3f49 100644 --- a/libraries/liblmdb/midl.h +++ b/Subtrees/mdb/libraries/liblmdb/midl.h @@ -74,14 +74,12 @@ typedef MDB_ID *MDB_IDL; xidl[xlen] = (id); \ } while (0) -#if 0 /* superseded by append/sort */ - /** Insert an ID into an IDL. - * @param[in,out] ids The IDL to insert into. - * @param[in] id The ID to insert. - * @return 0 on success, -1 if ID was already present, -2 on error. + /** Search for an ID in an IDL. + * @param[in] ids The IDL to search. + * @param[in] id The ID to search for. + * @return The index of the first ID greater than or equal to \b id. */ -int mdb_midl_insert( MDB_IDL ids, MDB_ID id ); -#endif +unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ); /** Allocate an IDL. * Allocates memory for an IDL of the given size. diff --git a/libraries/liblmdb/mtest.c b/Subtrees/mdb/libraries/liblmdb/mtest.c similarity index 100% rename from libraries/liblmdb/mtest.c rename to Subtrees/mdb/libraries/liblmdb/mtest.c diff --git a/libraries/liblmdb/mtest2.c b/Subtrees/mdb/libraries/liblmdb/mtest2.c similarity index 100% rename from libraries/liblmdb/mtest2.c rename to Subtrees/mdb/libraries/liblmdb/mtest2.c diff --git a/libraries/liblmdb/mtest3.c b/Subtrees/mdb/libraries/liblmdb/mtest3.c similarity index 100% rename from libraries/liblmdb/mtest3.c rename to Subtrees/mdb/libraries/liblmdb/mtest3.c diff --git a/libraries/liblmdb/mtest4.c b/Subtrees/mdb/libraries/liblmdb/mtest4.c similarity index 100% rename from libraries/liblmdb/mtest4.c rename to Subtrees/mdb/libraries/liblmdb/mtest4.c diff --git a/libraries/liblmdb/mtest5.c b/Subtrees/mdb/libraries/liblmdb/mtest5.c similarity index 100% rename from libraries/liblmdb/mtest5.c rename to Subtrees/mdb/libraries/liblmdb/mtest5.c diff --git a/libraries/liblmdb/mtest6.c b/Subtrees/mdb/libraries/liblmdb/mtest6.c similarity index 100% rename from libraries/liblmdb/mtest6.c rename to Subtrees/mdb/libraries/liblmdb/mtest6.c diff --git a/libraries/liblmdb/sample-bdb.c b/Subtrees/mdb/libraries/liblmdb/sample-bdb.c similarity index 100% rename from libraries/liblmdb/sample-bdb.c rename to Subtrees/mdb/libraries/liblmdb/sample-bdb.c diff --git a/libraries/liblmdb/sample-mdb.c b/Subtrees/mdb/libraries/liblmdb/sample-mdb.c similarity index 100% rename from libraries/liblmdb/sample-mdb.c rename to Subtrees/mdb/libraries/liblmdb/sample-mdb.c diff --git a/TODO.txt b/TODO.txt index 908475b5f3..69e5eda3da 100644 --- a/TODO.txt +++ b/TODO.txt @@ -2,16 +2,40 @@ RIPPLE TODO -------------------------------------------------------------------------------- -- Examples for different backend key/value config settings +Items marked '*' can be handled by third parties. -- Unit Test attention -- NodeStore backend unit test - -- Validations unit test +Vinnie's Short List (Changes day to day) +- Make theConfig a SharedSingleton to prevent leak warnings +- Add fast backend to the unit test +- Refactor Section code into ConfigFile +- Change NodeStore config file format to multiline key/value pairs +- Improved Mutex to track deadlocks +- Memory NodeStore::Backend for unit tests [*] +- Finish unit tests and code for Validators +- Import beast::db and use it in SQliteBackend +- Convert some Ripple boost unit tests to Beast. [*] +- Move all code into modules/ +- Work on KeyvaDB +[*] These can be handled by external developers -------------------------------------------------------------------------------- +- Raise the warning level and fix everything + +* Restyle all the macros in ripple_ConfigSection.h + +* Replace all throw with beast::Throw + Only in the ripple sources, not in Subtrees/ or protobuf or websocket + +- Replace base_uint and uintXXX with UnsignedInteger + * Need to specialize UnsignedInteger to work efficiently with 4 and 8 byte + multiples of the size. + +- Rewrite boost program_options in Beast + +- Validations unit test + - Replace endian conversion calls with beast calls: htobe32, be32toh, ntohl, etc... Start by removing the system headers which provide these routines, if possible @@ -118,8 +142,6 @@ RIPPLE TODO - Make LevelDB and Ripple code work with both Unicode and non-Unicode Windows APIs -- Raise the warning level and fix everything - - Go searching through VFALCO notes and fix everything - Deal with function-level statics used for SqliteDatabase (like in diff --git a/modules/ripple_app/data/ripple_DBInit.cpp b/modules/ripple_app/data/ripple_DBInit.cpp index 8639fe035b..a2d6ba1a7d 100644 --- a/modules/ripple_app/data/ripple_DBInit.cpp +++ b/modules/ripple_app/data/ripple_DBInit.cpp @@ -283,32 +283,15 @@ const char* WalletDBInit[] = int WalletDBCount = NUMBER (WalletDBInit); // Hash node database holds nodes indexed by hash -const char* HashNodeDBInit[] = -{ - "PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", - "PRAGMA journal_size_limit=1582080;", - -#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP) - "PRAGMA mmap_size=171798691840;", -#endif - - "BEGIN TRANSACTION;", - - "CREATE TABLE CommittedObjects ( \ - Hash CHARACTER(64) PRIMARY KEY, \ - ObjType CHAR(1) NOT NULL, \ - LedgerIndex BIGINT UNSIGNED, \ - Object BLOB \ - );", - - "END TRANSACTION;" -}; +// VFALCO TODO Remove this since it looks unused +/* int HashNodeDBCount = NUMBER (HashNodeDBInit); +*/ // Net node database holds nodes seen on the network // XXX Not really used needs replacement. +/* const char* NetNodeDBInit[] = { "CREATE TABLE KnownNodes ( \ @@ -320,7 +303,10 @@ const char* NetNodeDBInit[] = }; int NetNodeDBCount = NUMBER (NetNodeDBInit); +*/ +// This appears to be unused +/* const char* PathFindDBInit[] = { "PRAGMA synchronous = OFF; ", @@ -353,5 +339,5 @@ const char* PathFindDBInit[] = }; int PathFindDBCount = NUMBER (PathFindDBInit); +*/ -// vim:ts=4 diff --git a/modules/ripple_app/data/ripple_DBInit.h b/modules/ripple_app/data/ripple_DBInit.h index d6111f9612..489b511588 100644 --- a/modules/ripple_app/data/ripple_DBInit.h +++ b/modules/ripple_app/data/ripple_DBInit.h @@ -12,19 +12,11 @@ extern const char* RpcDBInit[]; extern const char* TxnDBInit[]; extern const char* LedgerDBInit[]; extern const char* WalletDBInit[]; -extern const char* HashNodeDBInit[]; // VFALCO TODO Figure out what these counts are for extern int RpcDBCount; extern int TxnDBCount; extern int LedgerDBCount; extern int WalletDBCount; -extern int HashNodeDBCount; - -// VFALCO TODO Seems these two aren't used so delete EVERYTHING. -extern const char* NetNodeDBInit[]; -extern const char* PathFindDBInit[]; -extern int NetNodeDBCount; -extern int PathFindDBCount; #endif diff --git a/modules/ripple_app/ledger/Ledger.cpp b/modules/ripple_app/ledger/Ledger.cpp index 54bbd48df9..3614fbbe07 100644 --- a/modules/ripple_app/ledger/Ledger.cpp +++ b/modules/ripple_app/ledger/Ledger.cpp @@ -529,10 +529,12 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus) assert (getTransHash () == mTransactionMap->getHash ()); // Save the ledger header in the hashed object store - Serializer s (128); - s.add32 (HashPrefix::ledgerMaster); - addRaw (s); - getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash); + { + Serializer s (128); + s.add32 (HashPrefix::ledgerMaster); + addRaw (s); + getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash); + } AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ()); diff --git a/modules/ripple_app/ledger/ripple_InboundLedger.cpp b/modules/ripple_app/ledger/ripple_InboundLedger.cpp index 30e8b3e0b3..40ef7c216a 100644 --- a/modules/ripple_app/ledger/ripple_InboundLedger.cpp +++ b/modules/ripple_app/ledger/ripple_InboundLedger.cpp @@ -48,7 +48,7 @@ bool InboundLedger::tryLocal () if (!mHaveBase) { // Nothing we can do without the ledger base - NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash); + NodeObject::pointer node = getApp().getNodeStore ().fetch (mHash); if (!node) { @@ -672,7 +672,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has Serializer s (data.size () + 4); s.add32 (HashPrefix::ledgerMaster); s.addRaw (data); - getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash); + getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.modData (), mHash); progress (); diff --git a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp index 12a3892378..860a53b4b5 100644 --- a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp @@ -6,118 +6,208 @@ #if RIPPLE_HYPERLEVELDB_AVAILABLE -class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend +class HyperLevelDBBackendFactory::Backend + : public NodeStore::Backend + , public NodeStore::BatchWriter::Callback + , LeakChecked { public: - Backend (StringPairArray const& keyValues) - : mName(keyValues ["path"].toStdString ()) - , mDB(NULL) + typedef RecycledObjectPool StringPool; + + Backend (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) + : m_keyBytes (keyBytes) + , m_scheduler (scheduler) + , m_batch (*this, scheduler) + , m_name (keyValues ["path"].toStdString ()) { - if (mName.empty()) - throw std::runtime_error ("Missing path in LevelDB backend"); + if (m_name.empty ()) + Throw (std::runtime_error ("Missing path in LevelDB backend")); hyperleveldb::Options options; options.create_if_missing = true; - if (keyValues["cache_mb"].isEmpty()) + if (keyValues ["cache_mb"].isEmpty ()) + { options.block_cache = hyperleveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024); + } else + { options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L); + } - if (keyValues["filter_bits"].isEmpty()) + if (keyValues ["filter_bits"].isEmpty()) { if (theConfig.NODE_SIZE >= 2) options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10); } - else if (keyValues["filter_bits"].getIntValue() != 0) - options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue()); + else if (keyValues ["filter_bits"].getIntValue() != 0) + { + options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ()); + } - if (!keyValues["open_files"].isEmpty()) - options.max_open_files = keyValues["open_files"].getIntValue(); + if (! keyValues["open_files"].isEmpty ()) + { + options.max_open_files = keyValues ["open_files"].getIntValue(); + } - hyperleveldb::Status status = hyperleveldb::DB::Open (options, mName, &mDB); - if (!status.ok () || !mDB) - throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + hyperleveldb::DB* db = nullptr; + hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db); + if (!status.ok () || !db) + Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + + m_db = db; } ~Backend () { - delete mDB; } - std::string getDataBaseName() + std::string getName() { - return mName; + return m_name; } - bool bulkStore (const std::vector< NodeObject::pointer >& objs) - { - hyperleveldb::WriteBatch batch; + //-------------------------------------------------------------------------- + + Status fetch (void const* key, NodeObject::Ptr* pObject) + { + pObject->reset (); + + Status status (ok); + + hyperleveldb::ReadOptions const options; + hyperleveldb::Slice const slice (static_cast (key), m_keyBytes); - BOOST_FOREACH (NodeObject::ref obj, objs) { - Blob blob (toBlob (obj)); - batch.Put ( - hyperleveldb::Slice (reinterpret_cast(obj->getHash ().begin ()), 256 / 8), - hyperleveldb::Slice (reinterpret_cast(&blob.front ()), blob.size ())); + // These are reused std::string objects, + // required for leveldb's funky interface. + // + StringPool::ScopedItem item (m_stringPool); + std::string& string = item.getObject (); + + hyperleveldb::Status getStatus = m_db->Get (options, slice, &string); + + if (getStatus.ok ()) + { + NodeStore::DecodedBlob decoded (key, string.data (), string.size ()); + + if (decoded.wasOk ()) + { + *pObject = decoded.createObject (); + } + else + { + // Decoding failed, probably corrupted! + // + status = dataCorrupt; + } + } + else + { + if (getStatus.IsCorruption ()) + { + status = dataCorrupt; + } + else if (getStatus.IsNotFound ()) + { + status = notFound; + } + else + { + status = unknown; + } + } } - return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok (); + + return status; } - NodeObject::pointer retrieve (uint256 const& hash) + void store (NodeObject::ref object) { - std::string sData; - if (!mDB->Get (hyperleveldb::ReadOptions (), - hyperleveldb::Slice (reinterpret_cast(hash.begin ()), 256 / 8), &sData).ok ()) + m_batch.store (object); + } + + void storeBatch (NodeStore::Batch const& batch) + { + hyperleveldb::WriteBatch wb; + { - return NodeObject::pointer(); + NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool); + + BOOST_FOREACH (NodeObject::ref object, batch) + { + item.getObject ().prepare (object); + + wb.Put ( + hyperleveldb::Slice (reinterpret_cast ( + item.getObject ().getKey ()), m_keyBytes), + hyperleveldb::Slice (reinterpret_cast ( + item.getObject ().getData ()), item.getObject ().getSize ())); + } } - return fromBinary(hash, &sData[0], sData.size ()); + + hyperleveldb::WriteOptions const options; + + m_db->Write (options, &wb).ok (); } - void visitAll (FUNCTION_TYPE func) + void visitAll (VisitCallback& callback) { - hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ()); + hyperleveldb::ReadOptions const options; + + ScopedPointer it (m_db->NewIterator (options)); + for (it->SeekToFirst (); it->Valid (); it->Next ()) { - if (it->key ().size () == 256 / 8) + if (it->key ().size () == m_keyBytes) { - uint256 hash; - memcpy(hash.begin(), it->key ().data(), 256 / 8); - func (fromBinary (hash, it->value ().data (), it->value ().size ())); + NodeStore::DecodedBlob decoded (it->key ().data (), + it->value ().data (), + it->value ().size ()); + + if (decoded.wasOk ()) + { + NodeObject::Ptr object (decoded.createObject ()); + + callback.visitObject (object); + } + else + { + // Uh oh, corrupted data! + WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ()); + } + } + else + { + // VFALCO NOTE What does it mean to find an + // incorrectly sized key? Corruption? + WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size (); } } } - Blob toBlob(NodeObject::ref obj) + int getWriteLoad () { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - * (bufPtr + 8) = static_cast (obj->getType ()); - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + return m_batch.getWriteLoad (); } - NodeObject::pointer fromBinary(uint256 const& hash, - char const* data, int size) + //-------------------------------------------------------------------------- + + void writeBatch (NodeStore::Batch const& batch) { - if (size < 9) - throw std::runtime_error ("undersized object"); - - uint32 index = htonl (*reinterpret_cast (data)); - int htype = data[8]; - - return boost::make_shared (static_cast (htype), index, - data + 9, size - 9, hash); + storeBatch (batch); } private: - std::string mName; - hyperleveldb::DB* mDB; + size_t const m_keyBytes; + NodeStore::Scheduler& m_scheduler; + NodeStore::BatchWriter m_batch; + StringPool m_stringPool; + NodeStore::EncodedBlob::Pool m_blobPool; + std::string m_name; + ScopedPointer m_db; }; //------------------------------------------------------------------------------ @@ -142,9 +232,12 @@ String HyperLevelDBBackendFactory::getName () const return "HyperLevelDB"; } -NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (StringPairArray const& keyValues) +NodeStore::Backend* HyperLevelDBBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new HyperLevelDBBackendFactory::Backend (keyValues); + return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler); } //------------------------------------------------------------------------------ diff --git a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h index 1b44e4f9d1..43920477d8 100644 --- a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h +++ b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h @@ -23,7 +23,10 @@ public: static HyperLevelDBBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp new file mode 100644 index 0000000000..8b08c87d41 --- /dev/null +++ b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp @@ -0,0 +1,179 @@ +//------------------------------------------------------------------------------ +/* + Copyright (c) 2011-2013, OpenCoin, Inc. +*/ +//============================================================================== + +class KeyvaDBBackendFactory::Backend : public NodeStore::Backend +{ +private: + typedef RecycledObjectPool MemoryPool; + typedef RecycledObjectPool EncodedBlobPool; + +public: + Backend (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) + : m_keyBytes (keyBytes) + , m_scheduler (scheduler) + , m_path (keyValues ["path"]) + , m_db (KeyvaDB::New ( + keyBytes, + 3, + File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("key"), + File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("val"))) + { + } + + ~Backend () + { + } + + std::string getName () + { + return m_path.toStdString (); + } + + //-------------------------------------------------------------------------- + + Status fetch (void const* key, NodeObject::Ptr* pObject) + { + pObject->reset (); + + Status status (ok); + + struct Callback : KeyvaDB::GetCallback + { + explicit Callback (MemoryBlock& block) + : m_block (block) + { + } + + void* getStorageForValue (int valueBytes) + { + m_size = valueBytes; + m_block.ensureSize (valueBytes); + + return m_block.getData (); + } + + void const* getData () const noexcept + { + return m_block.getData (); + } + + size_t getSize () const noexcept + { + return m_size; + } + + private: + MemoryBlock& m_block; + size_t m_size; + }; + + MemoryPool::ScopedItem item (m_memoryPool); + MemoryBlock& block (item.getObject ()); + + Callback cb (block); + + // VFALCO TODO Can't we get KeyvaDB to provide a proper status? + // + bool const found = m_db->get (key, &cb); + + if (found) + { + NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ()); + + if (decoded.wasOk ()) + { + *pObject = decoded.createObject (); + + status = ok; + } + else + { + status = dataCorrupt; + } + } + else + { + status = notFound; + } + + return status; + } + + void store (NodeObject::ref object) + { + EncodedBlobPool::ScopedItem item (m_blobPool); + NodeStore::EncodedBlob& encoded (item.getObject ()); + + encoded.prepare (object); + + m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ()); + } + + void storeBatch (NodeStore::Batch const& batch) + { + for (int i = 0; i < batch.size (); ++i) + store (batch [i]); + } + + void visitAll (VisitCallback& callback) + { + // VFALCO TODO Implement this! + // + bassertfalse; + //m_db->visitAll (); + } + + int getWriteLoad () + { + // we dont do pending writes + return 0; + } + + //-------------------------------------------------------------------------- + +private: + size_t const m_keyBytes; + NodeStore::Scheduler& m_scheduler; + String m_path; + ScopedPointer m_db; + MemoryPool m_memoryPool; + EncodedBlobPool m_blobPool; +}; + +//------------------------------------------------------------------------------ + +KeyvaDBBackendFactory::KeyvaDBBackendFactory () +{ +} + +KeyvaDBBackendFactory::~KeyvaDBBackendFactory () +{ +} + +KeyvaDBBackendFactory& KeyvaDBBackendFactory::getInstance () +{ + static KeyvaDBBackendFactory instance; + + return instance; +} + +String KeyvaDBBackendFactory::getName () const +{ + return "KeyvaDB"; +} + +NodeStore::Backend* KeyvaDBBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) +{ + return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler); +} + +//------------------------------------------------------------------------------ + diff --git a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h new file mode 100644 index 0000000000..40e76f1994 --- /dev/null +++ b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h @@ -0,0 +1,30 @@ +//------------------------------------------------------------------------------ +/* + Copyright (c) 2011-2013, OpenCoin, Inc. +*/ +//============================================================================== + +#ifndef RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED +#define RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED + +/** Factory to produce KeyvaDB backends for the NodeStore. +*/ +class KeyvaDBBackendFactory : public NodeStore::BackendFactory +{ +private: + class Backend; + + KeyvaDBBackendFactory (); + ~KeyvaDBBackendFactory (); + +public: + static KeyvaDBBackendFactory& getInstance (); + + String getName () const; + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); +}; + +#endif diff --git a/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp b/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp index b00fd0f287..0beb2d5c1b 100644 --- a/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp @@ -4,23 +4,38 @@ */ //============================================================================== -class LevelDBBackendFactory::Backend : public NodeStore::Backend +class LevelDBBackendFactory::Backend + : public NodeStore::Backend + , public NodeStore::BatchWriter::Callback + , LeakChecked { public: - Backend (StringPairArray const& keyValues) - : mName(keyValues ["path"].toStdString ()) - , mDB(NULL) + typedef RecycledObjectPool StringPool; + + //-------------------------------------------------------------------------- + + Backend (int keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) + : m_keyBytes (keyBytes) + , m_scheduler (scheduler) + , m_batch (*this, scheduler) + , m_name (keyValues ["path"].toStdString ()) { - if (mName.empty()) - throw std::runtime_error ("Missing path in LevelDB backend"); + if (m_name.empty()) + Throw (std::runtime_error ("Missing path in LevelDB backend")); leveldb::Options options; options.create_if_missing = true; if (keyValues["cache_mb"].isEmpty()) + { options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024); + } else + { options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L); + } if (keyValues["filter_bits"].isEmpty()) { @@ -28,94 +43,171 @@ public: options.filter_policy = leveldb::NewBloomFilterPolicy (10); } else if (keyValues["filter_bits"].getIntValue() != 0) + { options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue()); + } - if (!keyValues["open_files"].isEmpty()) + if (! keyValues["open_files"].isEmpty()) + { options.max_open_files = keyValues["open_files"].getIntValue(); + } - leveldb::Status status = leveldb::DB::Open (options, mName, &mDB); - if (!status.ok () || !mDB) - throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + leveldb::DB* db = nullptr; + leveldb::Status status = leveldb::DB::Open (options, m_name, &db); + if (!status.ok () || !db) + Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + + m_db = db; } ~Backend () { - delete mDB; } - std::string getDataBaseName() + std::string getName() { - return mName; + return m_name; } - bool bulkStore (const std::vector< NodeObject::pointer >& objs) - { - leveldb::WriteBatch batch; + //-------------------------------------------------------------------------- + + Status fetch (void const* key, NodeObject::Ptr* pObject) + { + pObject->reset (); + + Status status (ok); + + leveldb::ReadOptions const options; + leveldb::Slice const slice (static_cast (key), m_keyBytes); - BOOST_FOREACH (NodeObject::ref obj, objs) { - Blob blob (toBlob (obj)); - batch.Put ( - leveldb::Slice (reinterpret_cast(obj->getHash ().begin ()), 256 / 8), - leveldb::Slice (reinterpret_cast(&blob.front ()), blob.size ())); + // These are reused std::string objects, + // required for leveldb's funky interface. + // + StringPool::ScopedItem item (m_stringPool); + std::string& string = item.getObject (); + + leveldb::Status getStatus = m_db->Get (options, slice, &string); + + if (getStatus.ok ()) + { + NodeStore::DecodedBlob decoded (key, string.data (), string.size ()); + + if (decoded.wasOk ()) + { + *pObject = decoded.createObject (); + } + else + { + // Decoding failed, probably corrupted! + // + status = dataCorrupt; + } + } + else + { + if (getStatus.IsCorruption ()) + { + status = dataCorrupt; + } + else if (getStatus.IsNotFound ()) + { + status = notFound; + } + else + { + status = unknown; + } + } } - return mDB->Write (leveldb::WriteOptions (), &batch).ok (); + + return status; } - NodeObject::pointer retrieve (uint256 const& hash) + void store (NodeObject::ref object) { - std::string sData; - if (!mDB->Get (leveldb::ReadOptions (), - leveldb::Slice (reinterpret_cast(hash.begin ()), 256 / 8), &sData).ok ()) + m_batch.store (object); + } + + void storeBatch (NodeStore::Batch const& batch) + { + leveldb::WriteBatch wb; + { - return NodeObject::pointer(); + NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool); + + BOOST_FOREACH (NodeObject::ref object, batch) + { + item.getObject ().prepare (object); + + wb.Put ( + leveldb::Slice (reinterpret_cast (item.getObject ().getKey ()), + m_keyBytes), + leveldb::Slice (reinterpret_cast (item.getObject ().getData ()), + item.getObject ().getSize ())); + } } - return fromBinary(hash, &sData[0], sData.size ()); + + leveldb::WriteOptions const options; + + m_db->Write (options, &wb).ok (); } - void visitAll (FUNCTION_TYPE func) + void visitAll (VisitCallback& callback) { - leveldb::Iterator* it = mDB->NewIterator (leveldb::ReadOptions ()); + leveldb::ReadOptions const options; + + ScopedPointer it (m_db->NewIterator (options)); + for (it->SeekToFirst (); it->Valid (); it->Next ()) { - if (it->key ().size () == 256 / 8) + if (it->key ().size () == m_keyBytes) { - uint256 hash; - memcpy(hash.begin(), it->key ().data(), 256 / 8); - func (fromBinary (hash, it->value ().data (), it->value ().size ())); + NodeStore::DecodedBlob decoded (it->key ().data (), + it->value ().data (), + it->value ().size ()); + + if (decoded.wasOk ()) + { + NodeObject::Ptr object (decoded.createObject ()); + + callback.visitObject (object); + } + else + { + // Uh oh, corrupted data! + WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ()); + } + } + else + { + // VFALCO NOTE What does it mean to find an + // incorrectly sized key? Corruption? + WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size (); } } } - Blob toBlob(NodeObject::ref obj) + int getWriteLoad () { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - * (bufPtr + 8) = static_cast (obj->getType ()); - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + return m_batch.getWriteLoad (); } - NodeObject::pointer fromBinary(uint256 const& hash, - char const* data, int size) + //-------------------------------------------------------------------------- + + void writeBatch (NodeStore::Batch const& batch) { - if (size < 9) - throw std::runtime_error ("undersized object"); - - uint32 index = htonl (*reinterpret_cast (data)); - int htype = data[8]; - - return boost::make_shared (static_cast (htype), index, - data + 9, size - 9, hash); + storeBatch (batch); } private: - std::string mName; - leveldb::DB* mDB; + size_t const m_keyBytes; + NodeStore::Scheduler& m_scheduler; + NodeStore::BatchWriter m_batch; + StringPool m_stringPool; + NodeStore::EncodedBlob::Pool m_blobPool; + std::string m_name; + ScopedPointer m_db; }; //------------------------------------------------------------------------------ @@ -140,9 +232,12 @@ String LevelDBBackendFactory::getName () const return "LevelDB"; } -NodeStore::Backend* LevelDBBackendFactory::createInstance (StringPairArray const& keyValues) +NodeStore::Backend* LevelDBBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new LevelDBBackendFactory::Backend (keyValues); + return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler); } //------------------------------------------------------------------------------ diff --git a/modules/ripple_app/node/ripple_LevelDBBackendFactory.h b/modules/ripple_app/node/ripple_LevelDBBackendFactory.h index b2f324f927..3646125d1d 100644 --- a/modules/ripple_app/node/ripple_LevelDBBackendFactory.h +++ b/modules/ripple_app/node/ripple_LevelDBBackendFactory.h @@ -21,7 +21,10 @@ public: static LevelDBBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_MdbBackendFactory.cpp b/modules/ripple_app/node/ripple_MdbBackendFactory.cpp index 0b74349ab3..c454380f8f 100644 --- a/modules/ripple_app/node/ripple_MdbBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_MdbBackendFactory.cpp @@ -6,171 +6,242 @@ #if RIPPLE_MDB_AVAILABLE -class MdbBackendFactory::Backend : public NodeStore::Backend +class MdbBackendFactory::Backend + : public NodeStore::Backend + , public NodeStore::BatchWriter::Callback + , LeakChecked { public: - explicit Backend (StringPairArray const& keyValues) - : m_env (nullptr) + typedef NodeStore::Batch Batch; + typedef NodeStore::EncodedBlob EncodedBlob; + typedef NodeStore::DecodedBlob DecodedBlob; + + explicit Backend (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) + : m_keyBytes (keyBytes) + , m_scheduler (scheduler) + , m_batch (*this, scheduler) + , m_env (nullptr) { - if (keyValues ["path"].isEmpty ()) - throw std::runtime_error ("Missing path in MDB backend"); + String path (keyValues ["path"]); - int error = 0; + if (path.isEmpty ()) + Throw (std::runtime_error ("Missing path in MDB backend")); - error = mdb_env_create (&m_env); + m_basePath = path.toStdString(); - if (error == 0) // Should use the size of the file plus the free space on the disk - error = mdb_env_set_mapsize(m_env, 512L * 1024L * 1024L * 1024L); + // Regarding the path supplied to mdb_env_open: + // This directory must already exist and be writable. + // + File dir (File::getCurrentWorkingDirectory().getChildFile (path)); + Result result = dir.createDirectory (); - if (error == 0) - error = mdb_env_open ( - m_env, - keyValues ["path"].toStdString().c_str (), - MDB_NOTLS, - 0664); + if (result.wasOk ()) + { + int error = mdb_env_create (&m_env); - MDB_txn * txn; - if (error == 0) - error = mdb_txn_begin(m_env, NULL, 0, &txn); - if (error == 0) - error = mdb_dbi_open(txn, NULL, 0, &m_dbi); - if (error == 0) - error = mdb_txn_commit(txn); + // Should use the size of the file plus the free space on the disk + if (error == 0) + error = mdb_env_set_mapsize (m_env, 512L * 1024L * 1024L * 1024L); + if (error == 0) + error = mdb_env_open ( + m_env, + m_basePath.c_str (), + MDB_NOTLS, + 0664); - if (error != 0) + MDB_txn* txn; + + if (error == 0) + error = mdb_txn_begin (m_env, NULL, 0, &txn); + + if (error == 0) + error = mdb_dbi_open (txn, NULL, 0, &m_dbi); + + if (error == 0) + error = mdb_txn_commit (txn); + + if (error != 0) + { + String s; + s << "Error #" << error << " creating mdb environment"; + Throw (std::runtime_error (s.toStdString ())); + } + } + else { String s; - s << "Error #" << error << " creating mdb environment"; - throw std::runtime_error (s.toStdString ()); + s << "MDB Backend failed to create directory, " << result.getErrorMessage (); + Throw (std::runtime_error (s.toStdString().c_str())); } - m_name = keyValues ["path"].toStdString(); } ~Backend () { if (m_env != nullptr) { - mdb_dbi_close(m_env, m_dbi); + mdb_dbi_close (m_env, m_dbi); mdb_env_close (m_env); } } - std::string getDataBaseName() + std::string getName() { - return m_name; + return m_basePath; } - bool bulkStore (std::vector const& objs) + //-------------------------------------------------------------------------- + + template + unsigned char* mdb_cast (T* p) { - MDB_txn *txn = nullptr; - int rc = 0; + return const_cast (static_cast (p)); + } - rc = mdb_txn_begin(m_env, NULL, 0, &txn); + Status fetch (void const* key, NodeObject::Ptr* pObject) + { + pObject->reset (); - if (rc == 0) + Status status (ok); + + MDB_txn* txn = nullptr; + + int error = 0; + + error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn); + + if (error == 0) { - BOOST_FOREACH (NodeObject::ref obj, objs) - { - MDB_val key, data; - Blob blob (toBlob (obj)); + MDB_val dbkey; + MDB_val data; - key.mv_size = (256 / 8); - key.mv_data = const_cast(obj->getHash().begin()); + dbkey.mv_size = m_keyBytes; + dbkey.mv_data = mdb_cast (key); - data.mv_size = blob.size(); - data.mv_data = &blob.front(); + error = mdb_get (txn, m_dbi, &dbkey, &data); - rc = mdb_put(txn, m_dbi, &key, &data, 0); - if (rc != 0) + if (error == 0) + { + DecodedBlob decoded (key, data.mv_data, data.mv_size); + + if (decoded.wasOk ()) { - assert(false); + *pObject = decoded.createObject (); + } + else + { + status = dataCorrupt; + } + } + else if (error == MDB_NOTFOUND) + { + status = notFound; + } + else + { + status = unknown; + + WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error; + } + + mdb_txn_abort (txn); + } + else + { + status = unknown; + + WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error; + } + + return status; + } + + void store (NodeObject::ref object) + { + m_batch.store (object); + } + + void storeBatch (Batch const& batch) + { + MDB_txn* txn = nullptr; + + int error = 0; + + error = mdb_txn_begin (m_env, NULL, 0, &txn); + + if (error == 0) + { + EncodedBlob::Pool::ScopedItem item (m_blobPool); + + BOOST_FOREACH (NodeObject::Ptr const& object, batch) + { + EncodedBlob& encoded (item.getObject ()); + + encoded.prepare (object); + + MDB_val key; + key.mv_size = m_keyBytes; + key.mv_data = mdb_cast (encoded.getKey ()); + + MDB_val data; + data.mv_size = encoded.getSize (); + data.mv_data = mdb_cast (encoded.getData ()); + + error = mdb_put (txn, m_dbi, &key, &data, 0); + + if (error != 0) + { + WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error; break; } - } + } + + if (error == 0) + { + error = mdb_txn_commit(txn); + + if (error != 0) + { + WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error; + } + } + else + { + mdb_txn_abort (txn); + } } else - assert(false); - - if (rc == 0) - rc = mdb_txn_commit(txn); - else if (txn) - mdb_txn_abort(txn); - - assert(rc == 0); - return rc == 0; - } - - NodeObject::pointer retrieve (uint256 const& hash) - { - NodeObject::pointer ret; - - MDB_txn *txn = nullptr; - int rc = 0; - - rc = mdb_txn_begin(m_env, NULL, MDB_RDONLY, &txn); - - if (rc == 0) { - MDB_val key, data; - - key.mv_size = (256 / 8); - key.mv_data = const_cast(hash.begin()); - - rc = mdb_get(txn, m_dbi, &key, &data); - if (rc == 0) - ret = fromBinary(hash, static_cast(data.mv_data), data.mv_size); - else - assert(rc == MDB_NOTFOUND); + WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error; } - else - assert(false); - - mdb_txn_abort(txn); - - return ret; } - void visitAll (FUNCTION_TYPE func) - { // WRITEME - assert(false); - } - - Blob toBlob (NodeObject::ref obj) const + void visitAll (VisitCallback& callback) { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - - *(bufPtr + 8) = static_cast (obj->getType ()); - - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + // VFALCO TODO Implement this! + bassertfalse; } - NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) const + int getWriteLoad () { - if (size < 9) - throw std::runtime_error ("undersized object"); + return m_batch.getWriteLoad (); + } - uint32 const index = htonl (*reinterpret_cast (data)); + //-------------------------------------------------------------------------- - int const htype = data [8]; - - return boost::make_shared ( - static_cast (htype), - index, - data + 9, - size - 9, - hash); + void writeBatch (Batch const& batch) + { + storeBatch (batch); } private: - std::string m_name; + size_t const m_keyBytes; + NodeStore::Scheduler& m_scheduler; + NodeStore::BatchWriter m_batch; + NodeStore::EncodedBlob::Pool m_blobPool; + std::string m_basePath; MDB_env* m_env; MDB_dbi m_dbi; }; @@ -197,9 +268,12 @@ String MdbBackendFactory::getName () const return "mdb"; } -NodeStore::Backend* MdbBackendFactory::createInstance (StringPairArray const& keyValues) +NodeStore::Backend* MdbBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new MdbBackendFactory::Backend (keyValues); + return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler); } #endif diff --git a/modules/ripple_app/node/ripple_MdbBackendFactory.h b/modules/ripple_app/node/ripple_MdbBackendFactory.h index 702ca3a14a..2e1cd7db65 100644 --- a/modules/ripple_app/node/ripple_MdbBackendFactory.h +++ b/modules/ripple_app/node/ripple_MdbBackendFactory.h @@ -25,7 +25,10 @@ public: static MdbBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_NodeObject.cpp b/modules/ripple_app/node/ripple_NodeObject.cpp index ac8bce22ee..1d3f282762 100644 --- a/modules/ripple_app/node/ripple_NodeObject.cpp +++ b/modules/ripple_app/node/ripple_NodeObject.cpp @@ -6,30 +6,32 @@ SETUP_LOG (NodeObject) -NodeObject::NodeObject ( - NodeObjectType type, - LedgerIndex ledgerIndex, - Blob const& binaryDataToCopy, - uint256 const& hash) - : mType (type) - , mHash (hash) - , mLedgerIndex (ledgerIndex) - , mData (binaryDataToCopy) -{ -} +//------------------------------------------------------------------------------ NodeObject::NodeObject ( NodeObjectType type, LedgerIndex ledgerIndex, - void const* bufferToCopy, - int bytesInBuffer, - uint256 const& hash) + Blob& data, + uint256 const& hash, + PrivateAccess) : mType (type) , mHash (hash) , mLedgerIndex (ledgerIndex) - , mData (static_cast (bufferToCopy), - static_cast (bufferToCopy) + bytesInBuffer) { + // Take over the caller's buffer + mData.swap (data); +} + +NodeObject::Ptr NodeObject::createObject ( + NodeObjectType type, + LedgerIndex ledgerIndex, + Blob& data, + uint256 const & hash) +{ + // The boost::ref is important or + // else it will be passed by value! + return boost::make_shared ( + type, ledgerIndex, boost::ref (data), hash, PrivateAccess ()); } NodeObjectType NodeObject::getType () const @@ -51,3 +53,39 @@ Blob const& NodeObject::getData () const { return mData; } + +bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const +{ + if (mType != other->mType) + return false; + + if (mHash != other->mHash) + return false; + + if (mLedgerIndex != other->mLedgerIndex) + return false; + + if (mData != other->mData) + return false; + + return true; +} + +//------------------------------------------------------------------------------ + +class NodeObjectTests : public UnitTest +{ +public: + + NodeObjectTests () : UnitTest ("NodeObject", "ripple") + { + } + + + void runTest () + { + } +}; + +static NodeObjectTests nodeObjectTests; + diff --git a/modules/ripple_app/node/ripple_NodeObject.h b/modules/ripple_app/node/ripple_NodeObject.h index b889666f48..7bbf7dd584 100644 --- a/modules/ripple_app/node/ripple_NodeObject.h +++ b/modules/ripple_app/node/ripple_NodeObject.h @@ -34,27 +34,60 @@ class NodeObject : public CountedObject public: static char const* getCountedObjectName () { return "NodeObject"; } + enum + { + /** Size of the fixed keys, in bytes. + + We use a 256-bit hash for the keys. + + @see NodeObject + */ + keyBytes = 32, + }; + + /** The type used to hold the hash. + + The hahes are fixed size, SHA256. + + @note The key size can be retrieved with `Hash::sizeInBytes` + */ + typedef UnsignedInteger <32> Hash; + + // Please use this one. For a reference use Ptr const& + typedef boost::shared_ptr Ptr; + + // These are DEPRECATED, type names are capitalized. typedef boost::shared_ptr pointer; typedef pointer const& ref; - /** Create from a vector of data. - - @note A copy of the data is created. - */ +private: + // This hack is used to make the constructor effectively private + // except for when we use it in the call to make_shared. + // There's no portable way to make make_shared<> a friend work. + struct PrivateAccess { }; +public: + // This constructor is private, use createObject instead. NodeObject (NodeObjectType type, - LedgerIndex ledgerIndex, - Blob const & binaryDataToCopy, - uint256 const & hash); + LedgerIndex ledgerIndex, + Blob& data, + uint256 const& hash, + PrivateAccess); - /** Create from an area of memory. + /** Create an object from fields. - @note A copy of the data is created. + The caller's variable is modified during this call. The + underlying storage for the Blob is taken over by the NodeObject. + + @param type The type of object. + @param ledgerIndex The ledger in which this object appears. + @param data A buffer containing the payload. The caller's variable + is overwritten. + @param hash The 256-bit hash of the payload data. */ - NodeObject (NodeObjectType type, - LedgerIndex ledgerIndex, - void const * bufferToCopy, - int bytesInBuffer, - uint256 const & hash); + static Ptr createObject (NodeObjectType type, + LedgerIndex ledgerIndex, + Blob& data, + uint256 const& hash); /** Retrieve the type of this object. */ @@ -73,11 +106,30 @@ public: */ Blob const& getData () const; + /** See if this object has the same data as another object. + */ + bool isCloneOf (NodeObject::Ptr const& other) const; + + /** Binary function that satisfies the strict-weak-ordering requirement. + + This compares the hashes of both objects and returns true if + the first hash is considered to go before the second. + + @see std::sort + */ + struct LessThan + { + inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept + { + return lhs->getHash () < rhs->getHash (); + } + }; + private: - NodeObjectType const mType; - uint256 const mHash; - LedgerIndex const mLedgerIndex; - Blob const mData; + NodeObjectType mType; + uint256 mHash; + LedgerIndex mLedgerIndex; + Blob mData; }; #endif diff --git a/modules/ripple_app/node/ripple_NodeStore.cpp b/modules/ripple_app/node/ripple_NodeStore.cpp index 960e0d805f..b0ddd751d7 100644 --- a/modules/ripple_app/node/ripple_NodeStore.cpp +++ b/modules/ripple_app/node/ripple_NodeStore.cpp @@ -4,211 +4,164 @@ */ //============================================================================== -Array NodeStore::s_factories; - -NodeStore::NodeStore (String backendParameters, String fastBackendParameters, int cacheSize, int cacheAge) - : m_backend (createBackend (backendParameters)) - , mCache ("NodeStore", cacheSize, cacheAge) - , mNegativeCache ("HashedObjectNegativeCache", 0, 120) +NodeStore::DecodedBlob::DecodedBlob (void const* key, void const* value, int valueBytes) { - if (fastBackendParameters.isNotEmpty ()) - m_fastBackend = createBackend (fastBackendParameters); -} + /* Data format: -void NodeStore::addBackendFactory (BackendFactory& factory) -{ - s_factories.add (&factory); -} + Bytes -float NodeStore::getCacheHitRate () -{ - return mCache.getHitRate (); -} + 0...3 LedgerIndex 32-bit big endian integer + 4...7 Unused? An unused copy of the LedgerIndex + 8 char One of NodeObjectType + 9...end The body of the object data + */ -void NodeStore::tune (int size, int age) -{ - mCache.setTargetSize (size); - mCache.setTargetAge (age); -} + m_success = false; + m_key = key; + // VFALCO NOTE Ledger indexes should have started at 1 + m_ledgerIndex = LedgerIndex (-1); + m_objectType = hotUNKNOWN; + m_objectData = nullptr; + m_dataBytes = bmax (0, valueBytes - 9); -void NodeStore::sweep () -{ - mCache.sweep (); - mNegativeCache.sweep (); -} - -void NodeStore::waitWrite () -{ - m_backend->waitWrite (); - if (m_fastBackend) - m_fastBackend->waitWrite (); -} - -int NodeStore::getWriteLoad () -{ - return m_backend->getWriteLoad (); -} - -bool NodeStore::store (NodeObjectType type, uint32 index, - Blob const& data, uint256 const& hash) -{ - // return: false = already in cache, true = added to cache - if (mCache.touch (hash)) - return false; - -#ifdef PARANOID - assert (hash == Serializer::getSHA512Half (data)); -#endif - - NodeObject::pointer object = boost::make_shared (type, index, data, hash); - - if (!mCache.canonicalize (hash, object)) + if (valueBytes > 4) { - m_backend->store (object); - if (m_fastBackend) - m_fastBackend->store (object); + LedgerIndex const* index = static_cast (value); + m_ledgerIndex = ByteOrder::swapIfLittleEndian (*index); } - mNegativeCache.del (hash); - return true; -} + // VFALCO NOTE What about bytes 4 through 7 inclusive? -NodeObject::pointer NodeStore::retrieve (uint256 const& hash) -{ - NodeObject::pointer obj = mCache.fetch (hash); - - if (obj || mNegativeCache.isPresent (hash)) - return obj; - - if (m_fastBackend) + if (valueBytes > 8) { - obj = m_fastBackend->retrieve (hash); + unsigned char const* byte = static_cast (value); + m_objectType = static_cast (byte [8]); + } - if (obj) + if (valueBytes > 9) + { + m_objectData = static_cast (value) + 9; + + switch (m_objectType) { - mCache.canonicalize (hash, obj); - return obj; + case hotUNKNOWN: + default: + break; + + case hotLEDGER: + case hotTRANSACTION: + case hotACCOUNT_NODE: + case hotTRANSACTION_NODE: + m_success = true; + break; } } - - { - LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve")); - obj = m_backend->retrieve(hash); - - if (!obj) - { - mNegativeCache.add (hash); - return obj; - } - } - - mCache.canonicalize (hash, obj); - - if (m_fastBackend) - m_fastBackend->store(obj); - - WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db"; - return obj; } -void NodeStore::importVisitor ( - std::vector & objects, - NodeObject::pointer object) +NodeObject::Ptr NodeStore::DecodedBlob::createObject () { - if (objects.size() >= 128) - { - m_backend->bulkStore (objects); + bassert (m_success); - objects.clear (); - objects.reserve (128); + NodeObject::Ptr object; + + if (m_success) + { + Blob data (m_dataBytes); + + memcpy (data.data (), m_objectData, m_dataBytes); + + object = NodeObject::createObject ( + m_objectType, m_ledgerIndex, data, uint256 (m_key)); } - objects.push_back (object); + return object; } -int NodeStore::import (String sourceBackendParameters) +//------------------------------------------------------------------------------ + +void NodeStore::EncodedBlob::prepare (NodeObject::Ptr const& object) { - ScopedPointer srcBackend (createBackend (sourceBackendParameters)); + m_key = object->getHash ().begin (); - WriteLog (lsWARNING, NodeObject) << - "Node import from '" << srcBackend->getDataBaseName() << "' to '" - << m_backend->getDataBaseName() << "'."; + // This is how many bytes we need in the flat data + m_size = object->getData ().size () + 9; - std::vector objects; + m_data.ensureSize (m_size); - objects.reserve (128); + // These sizes must be the same! + static_bassert (sizeof (uint32) == sizeof (object->getIndex ())); - srcBackend->visitAll (BIND_TYPE (&NodeStore::importVisitor, this, boost::ref (objects), P_1)); - - if (!objects.empty ()) - m_backend->bulkStore (objects); - - return 0; -} - -NodeStore::Backend* NodeStore::createBackend (String const& parameters) -{ - Backend* backend = nullptr; - - StringPairArray keyValues = parseKeyValueParameters (parameters, '|'); - - String const& type = keyValues ["type"]; - - if (type.isNotEmpty ()) { - BackendFactory* factory = nullptr; + uint32* buf = static_cast (m_data.getData ()); - for (int i = 0; i < s_factories.size (); ++i) - { - if (s_factories [i]->getName () == type) - { - factory = s_factories [i]; - break; - } - } - - if (factory != nullptr) - { - backend = factory->createInstance (keyValues); - } - else - { - throw std::runtime_error ("unkown backend type"); - } - } - else - { - throw std::runtime_error ("missing backend type"); + buf [0] = ByteOrder::swapIfLittleEndian (object->getIndex ()); + buf [1] = ByteOrder::swapIfLittleEndian (object->getIndex ()); } - return backend; + { + unsigned char* buf = static_cast (m_data.getData ()); + + buf [8] = static_cast (object->getType ()); + + memcpy (&buf [9], object->getData ().data (), object->getData ().size ()); + } } -bool NodeStore::Backend::store (NodeObject::ref object) +//============================================================================== + +NodeStore::BatchWriter::BatchWriter (Callback& callback, Scheduler& scheduler) + : m_callback (callback) + , m_scheduler (scheduler) + , mWriteGeneration (0) + , mWriteLoad (0) + , mWritePending (false) { - boost::mutex::scoped_lock sl (mWriteMutex); + mWriteSet.reserve (batchWritePreallocationSize); +} + +NodeStore::BatchWriter::~BatchWriter () +{ + waitForWriting (); +} + +void NodeStore::BatchWriter::store (NodeObject::ref object) +{ + LockType::scoped_lock sl (mWriteMutex); + mWriteSet.push_back (object); - if (!mWritePending) + if (! mWritePending) { mWritePending = true; - getApp().getJobQueue ().addJob (jtWRITE, "NodeObject::store", - BIND_TYPE (&NodeStore::Backend::bulkWrite, this, P_1)); + + m_scheduler.scheduleTask (this); } - return true; } -void NodeStore::Backend::bulkWrite (Job &) +int NodeStore::BatchWriter::getWriteLoad () +{ + LockType::scoped_lock sl (mWriteMutex); + + return std::max (mWriteLoad, static_cast (mWriteSet.size ())); +} + +void NodeStore::BatchWriter::performScheduledTask () +{ + writeBatch (); +} + +void NodeStore::BatchWriter::writeBatch () { int setSize = 0; - while (1) + for (;;) { std::vector< boost::shared_ptr > set; - set.reserve (128); + + set.reserve (batchWritePreallocationSize); { - boost::mutex::scoped_lock sl (mWriteMutex); + LockType::scoped_lock sl (mWriteMutex); mWriteSet.swap (set); assert (mWriteSet.empty ()); @@ -219,29 +172,1006 @@ void NodeStore::Backend::bulkWrite (Job &) { mWritePending = false; mWriteLoad = 0; + + // VFALCO NOTE Fix this function to not return from the middle return; } + // VFALCO NOTE On the first trip through, mWriteLoad will be 0. + // This is probably not intended. Perhaps the order + // of calls isn't quite right + // mWriteLoad = std::max (setSize, static_cast (mWriteSet.size ())); + setSize = set.size (); } - bulkStore (set); + m_callback.writeBatch (set); } } -void NodeStore::Backend::waitWrite () +void NodeStore::BatchWriter::waitForWriting () { - boost::mutex::scoped_lock sl (mWriteMutex); + LockType::scoped_lock sl (mWriteMutex); int gen = mWriteGeneration; while (mWritePending && (mWriteGeneration == gen)) mWriteCondition.wait (sl); } -int NodeStore::Backend::getWriteLoad () -{ - boost::mutex::scoped_lock sl (mWriteMutex); +//============================================================================== - return std::max (mWriteLoad, static_cast (mWriteSet.size ())); +class NodeStoreImp + : public NodeStore + , LeakChecked +{ +public: + NodeStoreImp (Parameters const& backendParameters, + Parameters const& fastBackendParameters, + Scheduler& scheduler) + : m_scheduler (scheduler) + , m_backend (createBackend (backendParameters, scheduler)) + , m_fastBackend ((fastBackendParameters.size () > 0) + ? createBackend (fastBackendParameters, scheduler) : nullptr) + , m_cache ("NodeStore", 16384, 300) + , m_negativeCache ("NoteStoreNegativeCache", 0, 120) + { + } + + ~NodeStoreImp () + { + } + + String getName () const + { + return m_backend->getName (); + } + + //------------------------------------------------------------------------------ + + NodeObject::Ptr fetch (uint256 const& hash) + { + // See if the object already exists in the cache + // + NodeObject::Ptr obj = m_cache.fetch (hash); + + if (obj == nullptr) + { + // It's not in the cache, see if we can skip checking the db. + // + if (! m_negativeCache.isPresent (hash)) + { + // There's still a chance it could be in one of the databases. + + bool foundInFastBackend = false; + + // Check the fast backend database if we have one + // + if (m_fastBackend != nullptr) + { + obj = fetchInternal (m_fastBackend, hash); + + // If we found the object, avoid storing it again later. + if (obj != nullptr) + foundInFastBackend = true; + } + + // Are we still without an object? + // + if (obj == nullptr) + { + // Yes so at last we will try the main database. + // + { + // Monitor this operation's load since it is expensive. + // + // VFALCO TODO Why is this an autoptr? Why can't it just be a plain old object? + // + // VFALCO NOTE Commented this out because it breaks the unit test! + // + //LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve")); + + obj = fetchInternal (m_backend, hash); + } + + // If it's not in the main database, remember that so we + // can skip the lookup for the same object again later. + // + if (obj == nullptr) + m_negativeCache.add (hash); + } + + // Did we finally get something? + // + if (obj != nullptr) + { + // Yes it so canonicalize. This solves the problem where + // more than one thread has its own copy of the same object. + // + m_cache.canonicalize (hash, obj); + + if (! foundInFastBackend) + { + // If we have a fast back end, store it there for later. + // + if (m_fastBackend != nullptr) + m_fastBackend->store (obj); + + // Since this was a 'hard' fetch, we will log it. + // + WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db"; + } + } + } + else + { + // hash is known not to be in the database + } + } + else + { + // found it! + } + + return obj; + } + + NodeObject::Ptr fetchInternal (Backend* backend, uint256 const& hash) + { + NodeObject::Ptr object; + + Backend::Status const status = backend->fetch (hash.begin (), &object); + + switch (status) + { + case Backend::ok: + case Backend::notFound: + break; + + case Backend::dataCorrupt: + // VFALCO TODO Deal with encountering corrupt data! + // + WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << hash; + break; + + default: + WriteLog (lsWARNING, NodeObject) << "Unknown status=" << status; + break; + } + + return object; + } + + //------------------------------------------------------------------------------ + + void store (NodeObjectType type, + uint32 index, + Blob& data, + uint256 const& hash) + { + bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash); + + // VFALCO NOTE What happens if the key is found, but the object + // fell out of the cache? We will end up passing it + // to the backend anyway. + // + if (! keyFoundAndObjectCached) + { + #if RIPPLE_VERIFY_NODEOBJECT_KEYS + assert (hash == Serializer::getSHA512Half (data)); + #endif + + NodeObject::Ptr object = NodeObject::createObject ( + type, index, data, hash); + + if (!m_cache.canonicalize (hash, object)) + { + m_backend->store (object); + + if (m_fastBackend) + m_fastBackend->store (object); + } + + m_negativeCache.del (hash); + } + } + + //------------------------------------------------------------------------------ + + float getCacheHitRate () + { + return m_cache.getHitRate (); + } + + void tune (int size, int age) + { + m_cache.setTargetSize (size); + m_cache.setTargetAge (age); + } + + void sweep () + { + m_cache.sweep (); + m_negativeCache.sweep (); + } + + int getWriteLoad () + { + return m_backend->getWriteLoad (); + } + + //------------------------------------------------------------------------------ + + void visitAll (Backend::VisitCallback& callback) + { + m_backend->visitAll (callback); + } + + void import (NodeStore& sourceDatabase) + { + class ImportVisitCallback : public Backend::VisitCallback + { + public: + explicit ImportVisitCallback (Backend& backend) + : m_backend (backend) + { + m_objects.reserve (batchWritePreallocationSize); + } + + ~ImportVisitCallback () + { + if (! m_objects.empty ()) + m_backend.storeBatch (m_objects); + } + + void visitObject (NodeObject::Ptr const& object) + { + if (m_objects.size () >= batchWritePreallocationSize) + { + m_backend.storeBatch (m_objects); + + m_objects.clear (); + m_objects.reserve (batchWritePreallocationSize); + } + + m_objects.push_back (object); + } + + private: + Backend& m_backend; + Batch m_objects; + }; + + //-------------------------------------------------------------------------- + + ImportVisitCallback callback (*m_backend); + + sourceDatabase.visitAll (callback); + } + + //------------------------------------------------------------------------------ + + static NodeStore::Backend* createBackend ( + Parameters const& parameters, Scheduler& scheduler = getSynchronousScheduler ()) + { + Backend* backend = nullptr; + + String const& type = parameters ["type"]; + + if (type.isNotEmpty ()) + { + BackendFactory* factory = nullptr; + + for (int i = 0; i < s_factories.size (); ++i) + { + if (s_factories [i]->getName ().compareIgnoreCase (type) == 0) + { + factory = s_factories [i]; + break; + } + } + + if (factory != nullptr) + { + backend = factory->createInstance (NodeObject::keyBytes, parameters, scheduler); + } + else + { + Throw (std::runtime_error ("unknown backend type")); + } + } + else + { + Throw (std::runtime_error ("missing backend type")); + } + + return backend; + } + + static void addBackendFactory (BackendFactory& factory) + { + s_factories.add (&factory); + } + + //------------------------------------------------------------------------------ + +private: + static Array s_factories; + + Scheduler& m_scheduler; + + // Persistent key/value storage. + ScopedPointer m_backend; + + // Larger key/value storage, but not necessarily persistent. + ScopedPointer m_fastBackend; + + // VFALCO NOTE What are these things for? We need comments. + TaggedCache m_cache; + KeyCache m_negativeCache; +}; + +Array NodeStoreImp::s_factories; + +//------------------------------------------------------------------------------ + +void NodeStore::addBackendFactory (BackendFactory& factory) +{ + NodeStoreImp::addBackendFactory (factory); } + +NodeStore::Scheduler& NodeStore::getSynchronousScheduler () +{ + // Simple scheduler that performs the task immediately + struct SynchronousScheduler : Scheduler + { + void scheduleTask (Task* task) + { + task->performScheduledTask (); + } + }; + + static SynchronousScheduler scheduler; + + return scheduler; +} + +NodeStore* NodeStore::New (Parameters const& backendParameters, + Parameters fastBackendParameters, + Scheduler& scheduler) +{ + return new NodeStoreImp (backendParameters, + fastBackendParameters, + scheduler); +} + +//============================================================================== + +// Some common code for the unit tests +// +class NodeStoreUnitTest : public UnitTest +{ +public: + // Tunable parameters + // + enum + { + maxPayloadBytes = 1000, + numObjectsToTest = 1000 + }; + + // Shorthand type names + // + typedef NodeStore::Backend Backend; + typedef NodeStore::Batch Batch; + + // Creates predictable objects + class PredictableObjectFactory + { + public: + explicit PredictableObjectFactory (int64 seedValue) + : m_seedValue (seedValue) + { + } + + NodeObject::Ptr createObject (int index) + { + Random r (m_seedValue + index); + + NodeObjectType type; + switch (r.nextInt (4)) + { + case 0: type = hotLEDGER; break; + case 1: type = hotTRANSACTION; break; + case 2: type = hotACCOUNT_NODE; break; + case 3: type = hotTRANSACTION_NODE; break; + default: + type = hotUNKNOWN; + break; + }; + + LedgerIndex ledgerIndex = 1 + r.nextInt (1024 * 1024); + + uint256 hash; + r.nextBlob (hash.begin (), hash.size ()); + + int const payloadBytes = 1 + r.nextInt (maxPayloadBytes); + + Blob data (payloadBytes); + + r.nextBlob (data.data (), payloadBytes); + + return NodeObject::createObject (type, ledgerIndex, data, hash); + } + + private: + int64 const m_seedValue; + }; + +public: + NodeStoreUnitTest (String name, UnitTest::When when = UnitTest::runAlways) + : UnitTest (name, "ripple", when) + { + } + + // Create a predictable batch of objects + static void createPredictableBatch (Batch& batch, int startingIndex, int numObjects, int64 seedValue) + { + batch.reserve (numObjects); + + PredictableObjectFactory factory (seedValue); + + for (int i = 0; i < numObjects; ++i) + batch.push_back (factory.createObject (startingIndex + i)); + } + + // Compare two batches for equality + static bool areBatchesEqual (Batch const& lhs, Batch const& rhs) + { + bool result = true; + + if (lhs.size () == rhs.size ()) + { + for (int i = 0; i < lhs.size (); ++i) + { + if (! lhs [i]->isCloneOf (rhs [i])) + { + result = false; + break; + } + } + } + else + { + result = false; + } + + return result; + } + + // Store a batch in a backend + void storeBatch (Backend& backend, Batch const& batch) + { + for (int i = 0; i < batch.size (); ++i) + { + backend.store (batch [i]); + } + } + + // Get a copy of a batch in a backend + void fetchCopyOfBatch (Backend& backend, Batch* pCopy, Batch const& batch) + { + pCopy->clear (); + pCopy->reserve (batch.size ()); + + for (int i = 0; i < batch.size (); ++i) + { + NodeObject::Ptr object; + + Backend::Status const status = backend.fetch ( + batch [i]->getHash ().cbegin (), &object); + + expect (status == Backend::ok, "Should be ok"); + + if (status == Backend::ok) + { + expect (object != nullptr, "Should not be null"); + + pCopy->push_back (object); + } + } + } + + // Store all objects in a batch + static void storeBatch (NodeStore& db, NodeStore::Batch const& batch) + { + for (int i = 0; i < batch.size (); ++i) + { + NodeObject::Ptr const object (batch [i]); + + Blob data (object->getData ()); + + db.store (object->getType (), + object->getIndex (), + data, + object->getHash ()); + } + } + + // Fetch all the hashes in one batch, into another batch. + static void fetchCopyOfBatch (NodeStore& db, + NodeStore::Batch* pCopy, + NodeStore::Batch const& batch) + { + pCopy->clear (); + pCopy->reserve (batch.size ()); + + for (int i = 0; i < batch.size (); ++i) + { + NodeObject::Ptr object = db.fetch (batch [i]->getHash ()); + + if (object != nullptr) + pCopy->push_back (object); + } + } +}; + +//------------------------------------------------------------------------------ + +// Tests predictable batches, and NodeObject blob encoding +// +class NodeStoreBasicsTests : public NodeStoreUnitTest +{ +public: + typedef NodeStore::EncodedBlob EncodedBlob; + typedef NodeStore::DecodedBlob DecodedBlob; + + NodeStoreBasicsTests () : NodeStoreUnitTest ("NodeStoreBasics") + { + } + + // Make sure predictable object generation works! + void testBatches (int64 const seedValue) + { + beginTest ("batch"); + + Batch batch1; + createPredictableBatch (batch1, 0, numObjectsToTest, seedValue); + + Batch batch2; + createPredictableBatch (batch2, 0, numObjectsToTest, seedValue); + + expect (areBatchesEqual (batch1, batch2), "Should be equal"); + + Batch batch3; + createPredictableBatch (batch3, 1, numObjectsToTest, seedValue); + + expect (! areBatchesEqual (batch1, batch3), "Should not be equal"); + } + + // Checks encoding/decoding blobs + void testBlobs (int64 const seedValue) + { + beginTest ("encoding"); + + Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + EncodedBlob encoded; + for (int i = 0; i < batch.size (); ++i) + { + encoded.prepare (batch [i]); + + DecodedBlob decoded (encoded.getKey (), encoded.getData (), encoded.getSize ()); + + expect (decoded.wasOk (), "Should be ok"); + + if (decoded.wasOk ()) + { + NodeObject::Ptr const object (decoded.createObject ()); + + expect (batch [i]->isCloneOf (object), "Should be clones"); + } + } + } + + void runTest () + { + int64 const seedValue = 50; + + testBatches (seedValue); + + testBlobs (seedValue); + } +}; + +static NodeStoreBasicsTests nodeStoreBasicsTests; + +//------------------------------------------------------------------------------ + +// Tests the NodeStore::Backend interface +// +class NodeStoreBackendTests : public NodeStoreUnitTest +{ +public: + NodeStoreBackendTests () : NodeStoreUnitTest ("NodeStoreBackend") + { + } + + //-------------------------------------------------------------------------- + + void testBackend (String type, int64 const seedValue) + { + beginTest (String ("NodeStore::Backend type=") + type); + + StringPairArray params; + File const path (File::createTempFile ("node_db")); + params.set ("type", type); + params.set ("path", path.getFullPathName ()); + + // Create a batch + NodeStore::Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + { + // Open the backend + ScopedPointer backend (NodeStoreImp::createBackend (params)); + + // Write the batch + storeBatch (*backend, batch); + + { + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*backend, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + + { + // Reorder and read the copy again + NodeStore::Batch copy; + UnitTestUtilities::repeatableShuffle (batch.size (), batch, seedValue); + fetchCopyOfBatch (*backend, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + { + // Re-open the backend + ScopedPointer backend (NodeStoreImp::createBackend (params)); + + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*backend, ©, batch); + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + //-------------------------------------------------------------------------- + + void runTest () + { + int const seedValue = 50; + + testBackend ("keyvadb", seedValue); + + testBackend ("leveldb", seedValue); + + testBackend ("sqlite", seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testBackend ("hyperleveldb", seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testBackend ("mdb", seedValue); + #endif + } +}; + +static NodeStoreBackendTests nodeStoreBackendTests; + +//------------------------------------------------------------------------------ + +class NodeStoreTimingTests : public NodeStoreUnitTest +{ +public: + enum + { + numObjectsToTest = 20000 + }; + + NodeStoreTimingTests () + : NodeStoreUnitTest ("NodeStoreTiming", UnitTest::runManual) + { + } + + class Stopwatch + { + public: + Stopwatch () + { + } + + void start () + { + m_startTime = Time::getHighResolutionTicks (); + } + + double getElapsed () + { + int64 const now = Time::getHighResolutionTicks(); + + return Time::highResolutionTicksToSeconds (now - m_startTime); + } + + private: + int64 m_startTime; + }; + + //-------------------------------------------------------------------------- + + void testBackend (String type, int64 const seedValue) + { + String s; + s << "Testing backend '" << type << "' performance"; + beginTest (s); + + StringPairArray params; + File const path (File::createTempFile ("node_db")); + params.set ("type", type); + params.set ("path", path.getFullPathName ()); + + // Create batches + NodeStore::Batch batch1; + createPredictableBatch (batch1, 0, numObjectsToTest, seedValue); + NodeStore::Batch batch2; + createPredictableBatch (batch2, 0, numObjectsToTest, seedValue); + + // Open the backend + ScopedPointer backend (NodeStoreImp::createBackend (params)); + + Stopwatch t; + + // Individual write batch test + t.start (); + storeBatch (*backend, batch1); + s = ""; + s << " Single write: " << String (t.getElapsed (), 2) << " seconds"; + logMessage (s); + + // Bulk write batch test + t.start (); + backend->storeBatch (batch2); + s = ""; + s << " Batch write: " << String (t.getElapsed (), 2) << " seconds"; + logMessage (s); + + // Read test + Batch copy; + t.start (); + fetchCopyOfBatch (*backend, ©, batch1); + fetchCopyOfBatch (*backend, ©, batch2); + s = ""; + s << " Batch read: " << String (t.getElapsed (), 2) << " seconds"; + logMessage (s); + } + + //-------------------------------------------------------------------------- + + void runTest () + { + int const seedValue = 50; + + testBackend ("keyvadb", seedValue); + + testBackend ("leveldb", seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testBackend ("hyperleveldb", seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testBackend ("mdb", seedValue); + #endif + + testBackend ("sqlite", seedValue); + } +}; + +static NodeStoreTimingTests nodeStoreTimingTests; + +//------------------------------------------------------------------------------ + +class NodeStoreTests : public NodeStoreUnitTest +{ +public: + NodeStoreTests () : NodeStoreUnitTest ("NodeStore") + { + } + + void testImport (String destBackendType, String srcBackendType, int64 seedValue) + { + File const node_db (File::createTempFile ("node_db")); + StringPairArray srcParams; + srcParams.set ("type", srcBackendType); + srcParams.set ("path", node_db.getFullPathName ()); + + // Create a batch + NodeStore::Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + // Write to source db + { + ScopedPointer src (NodeStore::New (srcParams)); + + storeBatch (*src, batch); + } + + NodeStore::Batch copy; + + { + // Re-open the db + ScopedPointer src (NodeStore::New (srcParams)); + + // Set up the destination database + File const dest_db (File::createTempFile ("dest_db")); + StringPairArray destParams; + destParams.set ("type", destBackendType); + destParams.set ("path", dest_db.getFullPathName ()); + + ScopedPointer dest (NodeStore::New (destParams)); + + beginTest (String ("import into '") + destBackendType + "' from '" + srcBackendType + "'"); + + // Do the import + dest->import (*src); + + // Get the results of the import + fetchCopyOfBatch (*dest, ©, batch); + } + + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + + } + + //-------------------------------------------------------------------------- + + void testNodeStore (String type, bool const useEphemeralDatabase, int64 const seedValue) + { + String s; + s << String ("NodeStore backend '") + type + "'"; + if (useEphemeralDatabase) + s << " (with ephemeral database)"; + + beginTest (s); + + File const node_db (File::createTempFile ("node_db")); + StringPairArray nodeParams; + nodeParams.set ("type", type); + nodeParams.set ("path", node_db.getFullPathName ()); + + File const temp_db (File::createTempFile ("temp_db")); + StringPairArray tempParams; + if (useEphemeralDatabase) + { + tempParams.set ("type", type); + tempParams.set ("path", temp_db.getFullPathName ()); + } + + // Create a batch + NodeStore::Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + { + // Open the database + ScopedPointer db (NodeStore::New (nodeParams, tempParams)); + + // Write the batch + storeBatch (*db, batch); + + { + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*db, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + + { + // Reorder and read the copy again + NodeStore::Batch copy; + UnitTestUtilities::repeatableShuffle (batch.size (), batch, seedValue); + fetchCopyOfBatch (*db, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + { + // Re-open the database without the ephemeral DB + ScopedPointer db (NodeStore::New (nodeParams)); + + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*db, ©, batch); + + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + + if (useEphemeralDatabase) + { + // Verify the ephemeral db + ScopedPointer db (NodeStore::New (tempParams, StringPairArray ())); + + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*db, ©, batch); + + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + //-------------------------------------------------------------------------- + + void runBackendTests (bool useEphemeralDatabase, int64 const seedValue) + { + testNodeStore ("keyvadb", useEphemeralDatabase, seedValue); + + testNodeStore ("leveldb", useEphemeralDatabase, seedValue); + + testNodeStore ("sqlite", useEphemeralDatabase, seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testNodeStore ("hyperleveldb", useEphemeralDatabase, seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testNodeStore ("mdb", useEphemeralDatabase, seedValue); + #endif + } + + //-------------------------------------------------------------------------- + + void runImportTests (int64 const seedValue) + { + //testImport ("keyvadb", "keyvadb", seedValue); + + testImport ("leveldb", "leveldb", seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testImport ("hyperleveldb", "hyperleveldb", seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testImport ("mdb", "mdb", seedValue); + #endif + + testImport ("sqlite", "sqlite", seedValue); + } + + //-------------------------------------------------------------------------- + + void runTest () + { + int64 const seedValue = 50; + + runBackendTests (false, seedValue); + + runBackendTests (true, seedValue); + + runImportTests (seedValue); + } +}; + +static NodeStoreTests nodeStoreTests; diff --git a/modules/ripple_app/node/ripple_NodeStore.h b/modules/ripple_app/node/ripple_NodeStore.h index dc21f4c2f4..a2c26f72df 100644 --- a/modules/ripple_app/node/ripple_NodeStore.h +++ b/modules/ripple_app/node/ripple_NodeStore.h @@ -8,60 +8,282 @@ #define RIPPLE_NODESTORE_H_INCLUDED /** Persistency layer for NodeObject + + A Node is a ledger object which is uniquely identified by a key, which is + the 256-bit hash of the body of the node. The payload is a variable length + block of serialized data. + + All ledger data is stored as node objects and as such, needs to be persisted + between launches. Furthermore, since the set of node objects will in + general be larger than the amount of available memory, purged node objects + which are later accessed must be retrieved from the node store. + + @see NodeObject */ -class NodeStore : LeakChecked +class NodeStore { public: - /** Back end used for the store. + enum + { + // This is only used to pre-allocate the array for + // batch objects and does not affect the amount written. + // + batchWritePreallocationSize = 128 + }; + + typedef std::vector Batch; + + typedef StringPairArray Parameters; + + //-------------------------------------------------------------------------- + + /** Parsed key/value blob into NodeObject components. + + This will extract the information required to construct a NodeObject. It + also does consistency checking and returns the result, so it is possible + to determine if the data is corrupted without throwing an exception. Not + all forms of corruption are detected so further analysis will be needed + to eliminate false negatives. + + @note This defines the database format of a NodeObject! + */ + class DecodedBlob + { + public: + /** Construct the decoded blob from raw data. */ + DecodedBlob (void const* key, void const* value, int valueBytes); + + /** Determine if the decoding was successful. */ + bool wasOk () const noexcept { return m_success; } + + /** Create a NodeObject from this data. */ + NodeObject::Ptr createObject (); + + private: + bool m_success; + + void const* m_key; + LedgerIndex m_ledgerIndex; + NodeObjectType m_objectType; + unsigned char const* m_objectData; + int m_dataBytes; + }; + + //-------------------------------------------------------------------------- + + /** Utility for producing flattened node objects. + + These get recycled to prevent many small allocations. + + @note This defines the database format of a NodeObject! + */ + struct EncodedBlob + { + typedef RecycledObjectPool Pool; + + void prepare (NodeObject::Ptr const& object); + + void const* getKey () const noexcept { return m_key; } + + size_t getSize () const noexcept { return m_size; } + + void const* getData () const noexcept { return m_data.getData (); } + + private: + void const* m_key; + MemoryBlock m_data; + size_t m_size; + }; + + //-------------------------------------------------------------------------- + + /** Provides optional asynchronous scheduling for backends. + + For improved performance, a backend has the option of performing writes + in batches. These writes can be scheduled using the provided scheduler + object. + + @see BatchWriter + */ + class Scheduler + { + public: + /** Derived classes perform scheduled tasks. */ + struct Task + { + virtual ~Task () { } + + /** Performs the task. + + The call may take place on a foreign thread. + */ + virtual void performScheduledTask () = 0; + }; + + /** Schedules a task. + + Depending on the implementation, this could happen + immediately or get deferred. + */ + virtual void scheduleTask (Task* task) = 0; + }; + + //-------------------------------------------------------------------------- + + /** Helps with batch writing. + + The batch writes are performed with a scheduled task. Use of the + class it not required. A backend can implement its own write batching, + or skip write batching if doing so yields a performance benefit. + + @see Scheduler + */ + // VFALCO NOTE I'm not entirely happy having placed this here, + // because whoever needs to use NodeStore certainly doesn't + // need to see the implementation details of BatchWriter. + // + class BatchWriter : private Scheduler::Task + { + public: + /** This callback does the actual writing. */ + struct Callback + { + virtual void writeBatch (Batch const& batch) = 0; + }; + + /** Create a batch writer. */ + BatchWriter (Callback& callback, Scheduler& scheduler); + + /** Destroy a batch writer. + + Anything pending in the batch is written out before this returns. + */ + ~BatchWriter (); + + /** Store the object. + + This will add to the batch and initiate a scheduled task to + write the batch out. + */ + void store (NodeObject::Ptr const& object); + + /** Get an estimate of the amount of writing I/O pending. */ + int getWriteLoad (); + + private: + void performScheduledTask (); + void writeBatch (); + void waitForWriting (); + + private: + typedef boost::recursive_mutex LockType; + typedef boost::condition_variable_any CondvarType; + + Callback& m_callback; + Scheduler& m_scheduler; + LockType mWriteMutex; + CondvarType mWriteCondition; + int mWriteGeneration; + int mWriteLoad; + bool mWritePending; + Batch mWriteSet; + }; + + //-------------------------------------------------------------------------- + + /** A backend used for the store. + + The NodeStore uses a swappable backend so that other database systems + can be tried. Different databases may offer various features such + as improved performance, fault tolerant or distributed storage, or + all in-memory operation. + + A given instance of a backend is fixed to a particular key size. */ class Backend { public: - // VFALCO TODO Move the function definition to the .cpp - Backend () - : mWriteGeneration(0) - , mWriteLoad(0) - , mWritePending(false) + /** Return codes from operations. */ + enum Status { - mWriteSet.reserve(128); - } + ok, + notFound, + dataCorrupt, + unknown + }; + /** Destroy the backend. + + All open files are closed and flushed. If there are batched writes + or other tasks scheduled, they will be completed before this call + returns. + */ virtual ~Backend () { } - virtual std::string getDataBaseName() = 0; + /** Get the human-readable name of this backend. - // Store/retrieve a single object - // These functions must be thread safe - virtual bool store (NodeObject::ref); - virtual NodeObject::pointer retrieve (uint256 const &hash) = 0; + This is used for diagnostic output. + */ + virtual std::string getName() = 0; - // Store a group of objects - // This function will only be called from a single thread - virtual bool bulkStore (const std::vector< NodeObject::pointer >&) = 0; + /** Fetch a single object. - // Visit every object in the database - // This function will only be called during an import operation - // - // VFALCO TODO Replace FUNCTION_TYPE with a beast lift. - // - virtual void visitAll (FUNCTION_TYPE ) = 0; + If the object is not found or an error is encountered, the + result will indicate the condition. - // VFALCO TODO Put this bulk writing logic into a separate class. - virtual void bulkWrite (Job &); - virtual void waitWrite (); - virtual int getWriteLoad (); + @note This will be called concurrently. - protected: - // VFALCO TODO Put this bulk writing logic into a separate class. - boost::mutex mWriteMutex; - boost::condition_variable mWriteCondition; - int mWriteGeneration; - int mWriteLoad; - bool mWritePending; - std::vector > mWriteSet; + @param key A pointer to the key data. + @param pObject [out] The created object if successful. + + @return The result of the operation. + */ + virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0; + + /** Store a single object. + + Depending on the implementation this may happen immediately + or deferred using a scheduled task. + + @note This will be called concurrently. + + @param object The object to store. + */ + virtual void store (NodeObject::Ptr const& object) = 0; + + /** Store a group of objects. + + @note This function will not be called concurrently with + itself or @ref store. + */ + virtual void storeBatch (Batch const& batch) = 0; + + /** Callback for iterating through objects. + + @see visitAll + */ + struct VisitCallback + { + virtual void visitObject (NodeObject::Ptr const& object) = 0; + }; + + /** Visit every object in the database + + This is usually called during import. + + @note This routine will not be called concurrently with itself + or other methods. + + @see import, VisitCallback + */ + virtual void visitAll (VisitCallback& callback) = 0; + + /** Estimate the number of write operations pending. */ + virtual int getWriteLoad () = 0; }; -public: + //-------------------------------------------------------------------------- + /** Factory to produce backends. */ class BackendFactory @@ -69,67 +291,142 @@ public: public: virtual ~BackendFactory () { } - /** Retrieve the name of this factory. - */ + /** Retrieve the name of this factory. */ virtual String getName () const = 0; /** Create an instance of this factory's backend. + + @param keyBytes The fixed number of bytes per key. + @param keyValues A set of key/value configuration pairs. + @param scheduler The scheduler to use for running tasks. + + @return A pointer to the Backend object. */ - virtual Backend* createInstance (StringPairArray const& keyValues) = 0; + virtual Backend* createInstance (size_t keyBytes, + Parameters const& parameters, + Scheduler& scheduler) = 0; }; -public: + //-------------------------------------------------------------------------- + /** Construct a node store. - parameters has the format: + The parameters are key value pairs passed to the backend. The + 'type' key must exist, it defines the choice of backend. Most + backends also require a 'path' field. + + Some choices for 'type' are: + HyperLevelDB, LevelDB, SQLite, KeyvaDB, MDB - =['|'=] + If the fastBackendParameter is omitted or empty, no ephemeral database + is used. If the scheduler parameter is omited or unspecified, a + synchronous scheduler is used which performs all tasks immediately on + the caller's thread. - The key "type" must exist, it defines the backend. For example - "type=LevelDB|path=/mnt/ephemeral" + @note If the database cannot be opened or created, an exception is thrown. + + @param backendParameters The parameter string for the persistent backend. + @param fastBackendParameters [optional] The parameter string for the ephemeral backend. + @param scheduler [optional The scheduler to use for performing asynchronous tasks. + + @return The opened database. */ - // VFALCO NOTE Is cacheSize in bytes? objects? KB? - // Is cacheAge in minutes? seconds? - // - NodeStore (String backendParameters, - String fastBackendParameters, - int cacheSize, - int cacheAge); + static NodeStore* New (Parameters const& backendParameters, + Parameters fastBackendParameters = Parameters (), + Scheduler& scheduler = getSynchronousScheduler ()); + + /** Get the synchronous scheduler. + + The synchronous scheduler performs all tasks immediately, before + returning to the caller, using the caller's thread. + */ + static Scheduler& getSynchronousScheduler (); + + /** Destroy the node store. + + All pending operations are completed, pending writes flushed, + and files closed before this returns. + */ + virtual ~NodeStore () { } + + /** Retrieve the name associated with this backend. + + This is used for diagnostics and may not reflect the actual path + or paths used by the underlying backend. + */ + virtual String getName () const = 0; /** Add the specified backend factory to the list of available factories. The names of available factories are compared against the "type" value in the parameter list on construction. + + @param factory The factory to add. */ static void addBackendFactory (BackendFactory& factory); - float getCacheHitRate (); + /** Fetch an object. - bool store (NodeObjectType type, uint32 index, Blob const& data, - uint256 const& hash); + If the object is known to be not in the database, isn't found in the + database during the fetch, or failed to load correctly during the fetch, + `nullptr` is returned. - NodeObject::pointer retrieve (uint256 const& hash); + @note This can be called concurrently. - void waitWrite (); - void tune (int size, int age); - void sweep (); - int getWriteLoad (); + @param hash The key of the object to retrieve. - int import (String sourceBackendParameters); + @return The object, or nullptr if it couldn't be retrieved. + */ + virtual NodeObject::pointer fetch (uint256 const& hash) = 0; -private: - void importVisitor (std::vector & objects, NodeObject::pointer object); - - static Backend* createBackend (String const& parameters); + /** Store the object. - static Array s_factories; + The caller's Blob parameter is overwritten. -private: - ScopedPointer m_backend; - ScopedPointer m_fastBackend; + @param type The type of object. + @param ledgerIndex The ledger in which the object appears. + @param data The payload of the object. The caller's + variable is overwritten. + @param hash The 256-bit hash of the payload data. + + @return `true` if the object was stored? + */ + virtual void store (NodeObjectType type, + uint32 ledgerIndex, + Blob& data, + uint256 const& hash) = 0; + + /** Visit every object in the database + + This is usually called during import. + + @note This routine will not be called concurrently with itself + or other methods. + + @see import + */ + virtual void visitAll (Backend::VisitCallback& callback) = 0; + + /** Import objects from another database. */ + virtual void import (NodeStore& sourceDatabase) = 0; + + + /** Retrieve the estimated number of pending write operations. + + This is used for diagnostics. + */ + virtual int getWriteLoad () = 0; + + // VFALCO TODO Document this. + virtual float getCacheHitRate () = 0; + + // VFALCO TODO Document this. + // TODO Document the parameter meanings. + virtual void tune (int size, int age) = 0; + + // VFALCO TODO Document this. + virtual void sweep () = 0; - TaggedCache mCache; - KeyCache mNegativeCache; }; #endif diff --git a/modules/ripple_app/node/ripple_NullBackendFactory.cpp b/modules/ripple_app/node/ripple_NullBackendFactory.cpp index 6ffb0d8299..6a3b000c75 100644 --- a/modules/ripple_app/node/ripple_NullBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_NullBackendFactory.cpp @@ -15,28 +15,31 @@ public: { } - std::string getDataBaseName() + std::string getName() { return std::string (); } - bool store (NodeObject::ref obj) + Status fetch (void const*, NodeObject::Ptr*) + { + return notFound; + } + + void store (NodeObject::ref object) + { + } + + void storeBatch (NodeStore::Batch const& batch) { - return false; } - bool bulkStore (const std::vector< NodeObject::pointer >& objs) + void visitAll (VisitCallback& callback) { - return false; } - NodeObject::pointer retrieve (uint256 const& hash) - { - return NodeObject::pointer (); - } - - void visitAll (FUNCTION_TYPE func) + int getWriteLoad () { + return 0; } }; @@ -62,7 +65,10 @@ String NullBackendFactory::getName () const return "none"; } -NodeStore::Backend* NullBackendFactory::createInstance (StringPairArray const& keyValues) +NodeStore::Backend* NullBackendFactory::createInstance ( + size_t, + StringPairArray const&, + NodeStore::Scheduler&) { return new NullBackendFactory::Backend; } diff --git a/modules/ripple_app/node/ripple_NullBackendFactory.h b/modules/ripple_app/node/ripple_NullBackendFactory.h index 7112473384..a68c1838ea 100644 --- a/modules/ripple_app/node/ripple_NullBackendFactory.h +++ b/modules/ripple_app/node/ripple_NullBackendFactory.h @@ -23,7 +23,10 @@ public: static NullBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp b/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp index 0b421ac5be..7b4a7a9dc4 100644 --- a/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp @@ -4,97 +4,177 @@ */ //============================================================================== +static const char* s_nodeStoreDBInit [] = +{ + "PRAGMA synchronous=NORMAL;", + "PRAGMA journal_mode=WAL;", + "PRAGMA journal_size_limit=1582080;", + +#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP) + "PRAGMA mmap_size=171798691840;", +#endif + + "BEGIN TRANSACTION;", + + "CREATE TABLE CommittedObjects ( \ + Hash CHARACTER(64) PRIMARY KEY, \ + ObjType CHAR(1) NOT NULL, \ + LedgerIndex BIGINT UNSIGNED, \ + Object BLOB \ + );", + + "END TRANSACTION;" +}; + +static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit); + +//------------------------------------------------------------------------------ + class SqliteBackendFactory::Backend : public NodeStore::Backend { public: - Backend(std::string const& path) : mName(path) + Backend (size_t keyBytes, std::string const& path) + : m_keyBytes (keyBytes) + , m_name (path) + , m_db (new DatabaseCon(path, s_nodeStoreDBInit, s_nodeStoreDBCount)) { - mDb = new DatabaseCon(path, HashNodeDBInit, HashNodeDBCount); - mDb->getDB()->executeSQL(boost::str(boost::format("PRAGMA cache_size=-%d;") % - (theConfig.getSize(siHashNodeDBCache) * 1024))); + String s; + + // VFALCO TODO Remove this dependency on theConfig + // + s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024); + m_db->getDB()->executeSQL (s.toStdString ().c_str ()); } - Backend() + ~Backend() { - delete mDb; } - std::string getDataBaseName() + std::string getName() { - return mName; + return m_name; } - bool bulkStore(const std::vector< NodeObject::pointer >& objects) + //-------------------------------------------------------------------------- + + Status fetch (void const* key, NodeObject::Ptr* pObject) { - ScopedLock sl(mDb->getDBLock()); - static SqliteStatement pStB(mDb->getDB()->getSqliteDB(), "BEGIN TRANSACTION;"); - static SqliteStatement pStE(mDb->getDB()->getSqliteDB(), "END TRANSACTION;"); - static SqliteStatement pSt(mDb->getDB()->getSqliteDB(), + Status result = ok; + + pObject->reset (); + + { + ScopedLock sl (m_db->getDBLock()); + + uint256 const hash (key); + + static SqliteStatement pSt (m_db->getDB()->getSqliteDB(), + "SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;"); + + pSt.bind (1, hash.GetHex()); + + if (pSt.isRow (pSt.step())) + { + // VFALCO NOTE This is unfortunately needed, + // the DatabaseCon creates the blob? + Blob data (pSt.getBlob (2)); + *pObject = NodeObject::createObject ( + getTypeFromString (pSt.peekString (0)), + pSt.getUInt32 (1), + data, + hash); + } + else + { + result = notFound; + } + + pSt.reset(); + } + + return result; + } + + void store (NodeObject::ref object) + { + NodeStore::Batch batch; + + batch.push_back (object); + + storeBatch (batch); + } + + void storeBatch (NodeStore::Batch const& batch) + { + // VFALCO TODO Rewrite this to use Beast::db + + ScopedLock sl (m_db->getDBLock()); + + static SqliteStatement pStB (m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;"); + static SqliteStatement pStE (m_db->getDB()->getSqliteDB(), "END TRANSACTION;"); + static SqliteStatement pSt (m_db->getDB()->getSqliteDB(), "INSERT OR IGNORE INTO CommittedObjects " "(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);"); pStB.step(); pStB.reset(); - BOOST_FOREACH(NodeObject::ref object, objects) + BOOST_FOREACH (NodeObject::Ptr const& object, batch) { - bind(pSt, object); + doBind (pSt, object); + pSt.step(); pSt.reset(); } pStE.step(); pStE.reset(); - - return true; - } - NodeObject::pointer retrieve(uint256 const& hash) + void visitAll (VisitCallback& callback) { - NodeObject::pointer ret; + // No lock needed as per the visitAll() API - { - ScopedLock sl(mDb->getDBLock()); - static SqliteStatement pSt(mDb->getDB()->getSqliteDB(), - "SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;"); - - pSt.bind(1, hash.GetHex()); - - if (pSt.isRow(pSt.step())) - ret = boost::make_shared(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash); - - pSt.reset(); - } - - return ret; - } - - void visitAll(FUNCTION_TYPE func) - { uint256 hash; - static SqliteStatement pSt(mDb->getDB()->getSqliteDB(), + static SqliteStatement pSt(m_db->getDB()->getSqliteDB(), "SELECT ObjType,LedgerIndex,Object,Hash FROM CommittedObjects;"); - while (pSt.isRow(pSt.step())) + while (pSt.isRow (pSt.step())) { hash.SetHexExact(pSt.getString(3)); - func(boost::make_shared(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash)); + + // VFALCO NOTE This is unfortunately needed, + // the DatabaseCon creates the blob? + Blob data (pSt.getBlob (2)); + NodeObject::Ptr const object (NodeObject::createObject ( + getTypeFromString (pSt.peekString (0)), + pSt.getUInt32 (1), + data, + hash)); + + callback.visitObject (object); } - pSt.reset(); + pSt.reset (); } - void bind(SqliteStatement& statement, NodeObject::ref object) + int getWriteLoad () + { + return 0; + } + + //-------------------------------------------------------------------------- + + void doBind (SqliteStatement& statement, NodeObject::ref object) { char const* type; switch (object->getType()) { - case hotLEDGER: type = "L"; break; + case hotLEDGER: type = "L"; break; case hotTRANSACTION: type = "T"; break; - case hotACCOUNT_NODE: type = "A"; break; - case hotTRANSACTION_NODE: type = "N"; break; + case hotACCOUNT_NODE: type = "A"; break; + case hotTRANSACTION_NODE: type = "N"; break; default: type = "U"; } @@ -104,25 +184,27 @@ public: statement.bindStatic(4, object->getData()); } - NodeObjectType getType(std::string const& type) + NodeObjectType getTypeFromString (std::string const& s) { - NodeObjectType htype = hotUNKNOWN; - if (!type.empty()) + NodeObjectType type = hotUNKNOWN; + + if (!s.empty ()) { - switch (type[0]) + switch (s [0]) { - case 'L': htype = hotLEDGER; break; - case 'T': htype = hotTRANSACTION; break; - case 'A': htype = hotACCOUNT_NODE; break; - case 'N': htype = hotTRANSACTION_NODE; break; + case 'L': type = hotLEDGER; break; + case 'T': type = hotTRANSACTION; break; + case 'A': type = hotACCOUNT_NODE; break; + case 'N': type = hotTRANSACTION_NODE; break; } } - return htype; + return type; } private: - std::string mName; - DatabaseCon* mDb; + size_t const m_keyBytes; + std::string const m_name; + ScopedPointer m_db; }; //------------------------------------------------------------------------------ @@ -147,7 +229,10 @@ String SqliteBackendFactory::getName () const return "Sqlite"; } -NodeStore::Backend* SqliteBackendFactory::createInstance (StringPairArray const& keyValues) +NodeStore::Backend* SqliteBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new Backend (keyValues ["path"].toStdString ()); + return new Backend (keyBytes, keyValues ["path"].toStdString ()); } diff --git a/modules/ripple_app/node/ripple_SqliteBackendFactory.h b/modules/ripple_app/node/ripple_SqliteBackendFactory.h index e6420cbde2..828588fd74 100644 --- a/modules/ripple_app/node/ripple_SqliteBackendFactory.h +++ b/modules/ripple_app/node/ripple_SqliteBackendFactory.h @@ -21,7 +21,10 @@ public: static SqliteBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/ripple_app.cpp b/modules/ripple_app/ripple_app.cpp index afd567e708..ae84625c3b 100644 --- a/modules/ripple_app/ripple_app.cpp +++ b/modules/ripple_app/ripple_app.cpp @@ -65,6 +65,8 @@ #include "../ripple_core/ripple_core.h" +#include "beast/modules/beast_db/beast_db.h" + // VFALCO TODO fix these warnings! #ifdef _MSC_VER //#pragma warning (push) // Causes spurious C4503 "decorated name exceeds maximum length" @@ -102,8 +104,9 @@ namespace ripple #include "node/ripple_NodeObject.h" #include "node/ripple_NodeStore.h" -#include "node/ripple_LevelDBBackendFactory.h" #include "node/ripple_HyperLevelDBBackendFactory.h" +#include "node/ripple_KeyvaDBBackendFactory.h" +#include "node/ripple_LevelDBBackendFactory.h" #include "node/ripple_MdbBackendFactory.h" #include "node/ripple_NullBackendFactory.h" #include "node/ripple_SqliteBackendFactory.h" @@ -154,10 +157,10 @@ namespace ripple #include "src/cpp/ripple/TransactionMaster.h" #include "src/cpp/ripple/ripple_LocalCredentials.h" #include "src/cpp/ripple/WSDoor.h" +#include "src/cpp/ripple/ripple_Application.h" #include "src/cpp/ripple/RPCHandler.h" #include "src/cpp/ripple/TransactionQueue.h" #include "ledger/OrderBookDB.h" -#include "src/cpp/ripple/ripple_Application.h" #include "src/cpp/ripple/CallRPC.h" #include "src/cpp/ripple/Transactor.h" #include "src/cpp/ripple/ChangeTransactor.h" @@ -244,10 +247,11 @@ static const uint64 tenTo17m1 = tenTo17 - 1; #include "basics/ripple_RPCServerHandler.cpp" #include "node/ripple_NodeObject.cpp" #include "node/ripple_NodeStore.cpp" -#include "node/ripple_LevelDBBackendFactory.cpp" #include "node/ripple_HyperLevelDBBackendFactory.cpp" -#include "node/ripple_MdbBackendFactory.cpp" +#include "node/ripple_KeyvaDBBackendFactory.cpp" +#include "node/ripple_LevelDBBackendFactory.cpp" #include "node/ripple_NullBackendFactory.cpp" +#include "node/ripple_MdbBackendFactory.cpp" #include "node/ripple_SqliteBackendFactory.cpp" #include "ledger/Ledger.cpp" @@ -427,7 +431,6 @@ static DH* handleTmpDh (SSL* ssl, int is_export, int iKeyLength) #include "ledger/LedgerUnitTests.cpp" #include "src/cpp/ripple/ripple_SHAMapUnitTests.cpp" #include "src/cpp/ripple/ripple_SHAMapSyncUnitTests.cpp" -#include "src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp" // Requires ProofOfWorkFactory.h #include "src/cpp/ripple/ripple_SerializedTransactionUnitTests.cpp" //------------------------------------------------------------------------------ diff --git a/modules/ripple_basics/containers/ripple_TaggedCache.h b/modules/ripple_basics/containers/ripple_TaggedCache.h index 1551ebda05..1f3c294887 100644 --- a/modules/ripple_basics/containers/ripple_TaggedCache.h +++ b/modules/ripple_basics/containers/ripple_TaggedCache.h @@ -62,9 +62,75 @@ public: void sweep (); void clear (); - bool touch (const key_type& key); + /** Refresh the expiration time on a key. + + @param key The key to refresh. + @return `true` if the key was found and the object is cached. + */ + bool refreshIfPresent (const key_type& key) + { + bool found = false; + + // If present, make current in cache + boost::recursive_mutex::scoped_lock sl (mLock); + + cache_iterator cit = mCache.find (key); + + if (cit != mCache.end ()) + { + cache_entry& entry = cit->second; + + if (! entry.isCached ()) + { + // Convert weak to strong. + entry.ptr = entry.lock (); + + if (entry.isCached ()) + { + // We just put the object back in cache + ++mCacheCount; + entry.touch (); + found = true; + } + else + { + // Couldn't get strong pointer, + // object fell out of the cache so remove the entry. + mCache.erase (cit); + } + } + else + { + // It's cached so update the timer + entry.touch (); + found = true; + } + } + else + { + // not present + } + + return found; + } + bool del (const key_type& key, bool valid); + + /** Replace aliased objects with originals. + + Due to concurrency it is possible for two separate objects with + the same content and referring to the same unique "thing" to exist. + This routine eliminates the duplicate and performs a replacement + on the callers shared pointer if needed. + + @param key The key corresponding to the object + @param data A shared pointer to the data corresponding to the object. + @param replace `true` if `data` is the up to date version of the object. + + @return `true` if the operation was successful. + */ bool canonicalize (const key_type& key, boost::shared_ptr& data, bool replace = false); + bool store (const key_type& key, const c_Data& data); boost::shared_ptr fetch (const key_type& key); bool retrieve (const key_type& key, c_Data& data); @@ -264,40 +330,6 @@ void TaggedCache::sweep () } } -template -bool TaggedCache::touch (const key_type& key) -{ - // If present, make current in cache - boost::recursive_mutex::scoped_lock sl (mLock); - - cache_iterator cit = mCache.find (key); - - if (cit == mCache.end ()) // Don't have the object - return false; - - cache_entry& entry = cit->second; - - if (entry.isCached ()) - { - entry.touch (); - return true; - } - - entry.ptr = entry.lock (); - - if (entry.isCached ()) - { - // We just put the object back in cache - ++mCacheCount; - entry.touch (); - return true; - } - - // Object fell out - mCache.erase (cit); - return false; -} - template bool TaggedCache::del (const key_type& key, bool valid) { @@ -326,6 +358,7 @@ bool TaggedCache::del (const key_type& key, bool valid) return ret; } +// VFALCO NOTE What does it mean to canonicalize the data? template bool TaggedCache::canonicalize (const key_type& key, boost::shared_ptr& data, bool replace) { diff --git a/modules/ripple_basics/types/ripple_UInt256.h b/modules/ripple_basics/types/ripple_UInt256.h index fa135ee218..4790fea7f3 100644 --- a/modules/ripple_basics/types/ripple_UInt256.h +++ b/modules/ripple_basics/types/ripple_UInt256.h @@ -19,6 +19,10 @@ inline int Testuint256AdHoc (std::vector vArg); // We have to keep a separate base class without constructors // so the compiler will let us use it in a union +// +// VFALCO NOTE This class produces undefined behavior when +// BITS is not a multiple of 32!!! +// template class base_uint { @@ -30,6 +34,22 @@ protected: unsigned int pn[WIDTH]; public: + base_uint () + { + } + + /** Construct from a raw pointer. + + The buffer pointed to by `data` must be at least 32 bytes. + */ + explicit base_uint (void const* data) + { + // BITS must be a multiple of 32 + static_bassert ((BITS % 32) == 0); + + memcpy (&pn [0], data, BITS / 8); + } + bool isZero () const { for (int i = 0; i < WIDTH; i++) @@ -345,14 +365,24 @@ public: return reinterpret_cast (pn + WIDTH); } - const unsigned char* begin () const + unsigned char const* cbegin () const noexcept { - return reinterpret_cast (pn); + return reinterpret_cast (pn); } - const unsigned char* end () const + unsigned char const* cend () const noexcept { - return reinterpret_cast (pn + WIDTH); + return reinterpret_cast (pn + WIDTH); + } + + const unsigned char* begin () const noexcept + { + return cbegin (); + } + + const unsigned char* end () const noexcept + { + return cend (); } unsigned int size () const @@ -474,6 +504,11 @@ public: *this = b; } + explicit uint256 (void const* data) + : base_uint256 (data) + { + } + uint256& operator= (uint64 uHost) { zero (); @@ -590,7 +625,7 @@ template inline std::ostream& operator<< (std::ostream& out, inline int Testuint256AdHoc (std::vector vArg) { - uint256 g (0); + uint256 g (uint64 (0)); printf ("%s\n", g.ToString ().c_str ()); --g; diff --git a/modules/ripple_basics/utility/ripple_IniFile.cpp b/modules/ripple_basics/utility/ripple_IniFile.cpp index 8f60104d83..7c230cb03f 100644 --- a/modules/ripple_basics/utility/ripple_IniFile.cpp +++ b/modules/ripple_basics/utility/ripple_IniFile.cpp @@ -106,7 +106,7 @@ int SectionCount (Section& secSource, const std::string& strSection) { Section::mapped_type* pmtEntries = SectionEntries (secSource, strSection); - return pmtEntries ? -1 : pmtEntries->size (); + return pmtEntries ? pmtEntries->size () : 0; } bool SectionSingleB (Section& secSource, const std::string& strSection, std::string& strValue) @@ -128,4 +128,37 @@ bool SectionSingleB (Section& secSource, const std::string& strSection, std::str return bSingle; } -// vim:ts=4 +StringPairArray parseKeyValueSection (Section& secSource, String const& strSection) +{ + StringPairArray result; + + // yuck. + std::string const stdStrSection (strSection.toStdString ()); + + int const count = SectionCount (secSource, stdStrSection); + + typedef Section::mapped_type Entries; + + Entries* const entries = SectionEntries (secSource, stdStrSection); + + if (entries != nullptr) + { + for (Entries::const_iterator iter = entries->begin (); iter != entries->end (); ++iter) + { + String const line (iter->c_str ()); + + int const equalPos = line.indexOfChar ('='); + + if (equalPos != -1) + { + String const key = line.substring (0, equalPos); + String const value = line.substring (equalPos + 1, line.length ()); + + result.set (key, value); + } + } + } + + return result; +} + diff --git a/modules/ripple_basics/utility/ripple_IniFile.h b/modules/ripple_basics/utility/ripple_IniFile.h index fe5327ec89..d8e27abb95 100644 --- a/modules/ripple_basics/utility/ripple_IniFile.h +++ b/modules/ripple_basics/utility/ripple_IniFile.h @@ -20,4 +20,11 @@ bool SectionSingleB (Section& secSource, const std::string& strSection, std::str int SectionCount (Section& secSource, const std::string& strSection); Section::mapped_type* SectionEntries (Section& secSource, const std::string& strSection); +/** Parse a section of lines as a key/value array. + + Each line is in the form =. + Spaces are considered part of the key and value. +*/ +StringPairArray parseKeyValueSection (Section& secSource, String const& strSection); + #endif diff --git a/modules/ripple_basics/utility/ripple_ScopedLock.h b/modules/ripple_basics/utility/ripple_ScopedLock.h index a51dd1dd3a..a38a539251 100644 --- a/modules/ripple_basics/utility/ripple_ScopedLock.h +++ b/modules/ripple_basics/utility/ripple_ScopedLock.h @@ -37,7 +37,7 @@ public: }; // A class that unlocks on construction and locks on destruction - +/* class ScopedUnlock { protected: @@ -80,5 +80,6 @@ private: ScopedUnlock (const ScopedUnlock&); // no implementation ScopedUnlock& operator= (const ScopedUnlock&); // no implementation }; +*/ #endif diff --git a/modules/ripple_basics/utility/ripple_StringUtilities.cpp b/modules/ripple_basics/utility/ripple_StringUtilities.cpp index cdc438a6ef..550fab9213 100644 --- a/modules/ripple_basics/utility/ripple_StringUtilities.cpp +++ b/modules/ripple_basics/utility/ripple_StringUtilities.cpp @@ -271,7 +271,7 @@ std::string addressToString (void const* address) return strHex (static_cast (address) - static_cast (0)); } -StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimiter) +StringPairArray parseDelimitedKeyValueString (String parameters, beast_wchar delimiter) { StringPairArray keyValues; @@ -309,4 +309,3 @@ StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimite return keyValues; } - diff --git a/modules/ripple_basics/utility/ripple_StringUtilities.h b/modules/ripple_basics/utility/ripple_StringUtilities.h index dd002a2a23..08dc14c545 100644 --- a/modules/ripple_basics/utility/ripple_StringUtilities.h +++ b/modules/ripple_basics/utility/ripple_StringUtilities.h @@ -214,8 +214,12 @@ bool parseUrl (const std::string& strUrl, std::string& strScheme, std::string& s */ extern std::string addressToString (void const* address); -/** Parse a pipe delimited key/value parameter string. +/** Create a Parameters from a String. + + Parameter strings have the format: + + =['|'=] */ -StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimiter); +extern StringPairArray parseDelimitedKeyValueString (String s, beast_wchar delimiter='|'); #endif diff --git a/modules/ripple_core/functional/ripple_Config.cpp b/modules/ripple_core/functional/ripple_Config.cpp index 896d298280..2706f12923 100644 --- a/modules/ripple_core/functional/ripple_Config.cpp +++ b/modules/ripple_core/functional/ripple_Config.cpp @@ -8,71 +8,6 @@ // TODO: Check permissions on config file before using it. // -// VFALCO TODO Rename and replace these macros with variables. -#define SECTION_ACCOUNT_PROBE_MAX "account_probe_max" -#define SECTION_CLUSTER_NODES "cluster_nodes" -#define SECTION_DATABASE_PATH "database_path" -#define SECTION_DEBUG_LOGFILE "debug_logfile" -#define SECTION_ELB_SUPPORT "elb_support" -#define SECTION_FEE_DEFAULT "fee_default" -#define SECTION_FEE_NICKNAME_CREATE "fee_nickname_create" -#define SECTION_FEE_OFFER "fee_offer" -#define SECTION_FEE_OPERATION "fee_operation" -#define SECTION_FEE_ACCOUNT_RESERVE "fee_account_reserve" -#define SECTION_FEE_OWNER_RESERVE "fee_owner_reserve" -#define SECTION_NODE_DB "node_db" -#define SECTION_FASTNODE_DB "temp_db" -#define SECTION_LEDGER_HISTORY "ledger_history" -#define SECTION_IPS "ips" -#define SECTION_NETWORK_QUORUM "network_quorum" -#define SECTION_NODE_SEED "node_seed" -#define SECTION_NODE_SIZE "node_size" -#define SECTION_PATH_SEARCH_SIZE "path_search_size" -#define SECTION_PEER_CONNECT_LOW_WATER "peer_connect_low_water" -#define SECTION_PEER_IP "peer_ip" -#define SECTION_PEER_PORT "peer_port" -#define SECTION_PEER_PRIVATE "peer_private" -#define SECTION_PEER_SCAN_INTERVAL_MIN "peer_scan_interval_min" -#define SECTION_PEER_SSL_CIPHER_LIST "peer_ssl_cipher_list" -#define SECTION_PEER_START_MAX "peer_start_max" -#define SECTION_RPC_ALLOW_REMOTE "rpc_allow_remote" -#define SECTION_RPC_ADMIN_ALLOW "rpc_admin_allow" -#define SECTION_RPC_ADMIN_USER "rpc_admin_user" -#define SECTION_RPC_ADMIN_PASSWORD "rpc_admin_password" -#define SECTION_RPC_IP "rpc_ip" -#define SECTION_RPC_PORT "rpc_port" -#define SECTION_RPC_USER "rpc_user" -#define SECTION_RPC_PASSWORD "rpc_password" -#define SECTION_RPC_STARTUP "rpc_startup" -#define SECTION_RPC_SECURE "rpc_secure" -#define SECTION_RPC_SSL_CERT "rpc_ssl_cert" -#define SECTION_RPC_SSL_CHAIN "rpc_ssl_chain" -#define SECTION_RPC_SSL_KEY "rpc_ssl_key" -#define SECTION_SMS_FROM "sms_from" -#define SECTION_SMS_KEY "sms_key" -#define SECTION_SMS_SECRET "sms_secret" -#define SECTION_SMS_TO "sms_to" -#define SECTION_SMS_URL "sms_url" -#define SECTION_SNTP "sntp_servers" -#define SECTION_SSL_VERIFY "ssl_verify" -#define SECTION_SSL_VERIFY_FILE "ssl_verify_file" -#define SECTION_SSL_VERIFY_DIR "ssl_verify_dir" -#define SECTION_VALIDATORS_FILE "validators_file" -#define SECTION_VALIDATION_QUORUM "validation_quorum" -#define SECTION_VALIDATION_SEED "validation_seed" -#define SECTION_WEBSOCKET_PUBLIC_IP "websocket_public_ip" -#define SECTION_WEBSOCKET_PUBLIC_PORT "websocket_public_port" -#define SECTION_WEBSOCKET_PUBLIC_SECURE "websocket_public_secure" -#define SECTION_WEBSOCKET_PING_FREQ "websocket_ping_frequency" -#define SECTION_WEBSOCKET_IP "websocket_ip" -#define SECTION_WEBSOCKET_PORT "websocket_port" -#define SECTION_WEBSOCKET_SECURE "websocket_secure" -#define SECTION_WEBSOCKET_SSL_CERT "websocket_ssl_cert" -#define SECTION_WEBSOCKET_SSL_CHAIN "websocket_ssl_chain" -#define SECTION_WEBSOCKET_SSL_KEY "websocket_ssl_key" -#define SECTION_VALIDATORS "validators" -#define SECTION_VALIDATORS_SITE "validators_site" - // Fees are in XRP. #define DEFAULT_FEE_DEFAULT 10 #define DEFAULT_FEE_ACCOUNT_RESERVE 200*SYSTEM_CURRENCY_PARTS @@ -81,6 +16,8 @@ #define DEFAULT_FEE_OFFER DEFAULT_FEE_DEFAULT #define DEFAULT_FEE_OPERATION 1 +// VFALCO TODO Convert this to a SharedSingleton to prevent exit leaks +// Config theConfig; void Config::setup (const std::string& strConf, bool bTestNet, bool bQuiet) @@ -373,8 +310,23 @@ void Config::load () (void) SectionSingleB (secConfig, SECTION_RPC_IP, m_rpcIP); (void) SectionSingleB (secConfig, SECTION_RPC_PASSWORD, RPC_PASSWORD); (void) SectionSingleB (secConfig, SECTION_RPC_USER, RPC_USER); - (void) SectionSingleB (secConfig, SECTION_NODE_DB, NODE_DB); - (void) SectionSingleB (secConfig, SECTION_FASTNODE_DB, FASTNODE_DB); + + //--------------------------------------- + // + // VFALCO BEGIN CLEAN + // + theConfig.nodeDatabase = parseKeyValueSection ( + secConfig, ConfigSection::nodeDatabase ()); + + theConfig.ephemeralNodeDatabase = parseKeyValueSection ( + secConfig, ConfigSection::tempNodeDatabase ()); + + theConfig.importNodeDatabase = parseKeyValueSection ( + secConfig, ConfigSection::importNodeDatabase ()); + // + // VFALCO END CLEAN + // + //--------------------------------------- if (SectionSingleB (secConfig, SECTION_RPC_PORT, strTemp)) m_rpcPort = boost::lexical_cast (strTemp); diff --git a/modules/ripple_core/functional/ripple_Config.h b/modules/ripple_core/functional/ripple_Config.h index 47ecf9a5b9..6f1cfc9bc9 100644 --- a/modules/ripple_core/functional/ripple_Config.h +++ b/modules/ripple_core/functional/ripple_Config.h @@ -84,9 +84,41 @@ public: boost::filesystem::path DATA_DIR; boost::filesystem::path DEBUG_LOGFILE; boost::filesystem::path VALIDATORS_FILE; // As specifed in rippled.cfg. - std::string NODE_DB; // Database to use for nodes - std::string FASTNODE_DB; // Database for temporary storage - std::string DB_IMPORT; // Import from old DB + + /** Parameters for the main NodeStore database. + + This is 1 or more strings of the form = + The 'type' and 'path' keys are required, see rippled-example.cfg + + @see NodeStore + */ + StringPairArray nodeDatabase; + + /** Parameters for the ephemeral NodeStore database. + + This is an auxiliary database for the NodeStore, usually placed + on a separate faster volume. However, the volume data may not persist + between launches. Use of the ephemeral database is optional. + + The format is the same as that for @ref nodeDatabase + + @see NodeStore + */ + StringPairArray ephemeralNodeDatabase; + + /** Parameters for importing an old database in to the current node database. + + If this is not empty, then it specifies the key/value parameters for + another node database from which to import all data into the current + node database specified by @ref nodeDatabase. + + The format of this string is in the form: + '='['|''='value] + + @see parseDelimitedKeyValueString + */ + StringPairArray importNodeDatabase; + bool ELB_SUPPORT; // Support Amazon ELB std::string VALIDATORS_SITE; // Where to find validators.txt on the Internet. diff --git a/modules/ripple_core/functional/ripple_ConfigSections.h b/modules/ripple_core/functional/ripple_ConfigSections.h new file mode 100644 index 0000000000..445137ae49 --- /dev/null +++ b/modules/ripple_core/functional/ripple_ConfigSections.h @@ -0,0 +1,86 @@ +//------------------------------------------------------------------------------ +/* + Copyright (c) 2011-2013, OpenCoin, Inc. +*/ +//============================================================================== + +#ifndef RIPPLE_CONFIGSECTIONS_H_INCLUDED +#define RIPPLE_CONFIGSECTIONS_H_INCLUDED + +// VFALCO NOTE +// +// Please use this style for all new sections +// And if you're feeling generous, convert all the +// existing macros to this format as well. +// +struct ConfigSection +{ + static String nodeDatabase () { return "node_db"; } + static String tempNodeDatabase () { return "temp_db"; } + static String importNodeDatabase () { return "import_db"; } +}; + +// VFALCO TODO Rename and replace these macros with variables. +#define SECTION_ACCOUNT_PROBE_MAX "account_probe_max" +#define SECTION_CLUSTER_NODES "cluster_nodes" +#define SECTION_DATABASE_PATH "database_path" +#define SECTION_DEBUG_LOGFILE "debug_logfile" +#define SECTION_ELB_SUPPORT "elb_support" +#define SECTION_FEE_DEFAULT "fee_default" +#define SECTION_FEE_NICKNAME_CREATE "fee_nickname_create" +#define SECTION_FEE_OFFER "fee_offer" +#define SECTION_FEE_OPERATION "fee_operation" +#define SECTION_FEE_ACCOUNT_RESERVE "fee_account_reserve" +#define SECTION_FEE_OWNER_RESERVE "fee_owner_reserve" +#define SECTION_LEDGER_HISTORY "ledger_history" +#define SECTION_IPS "ips" +#define SECTION_NETWORK_QUORUM "network_quorum" +#define SECTION_NODE_SEED "node_seed" +#define SECTION_NODE_SIZE "node_size" +#define SECTION_PATH_SEARCH_SIZE "path_search_size" +#define SECTION_PEER_CONNECT_LOW_WATER "peer_connect_low_water" +#define SECTION_PEER_IP "peer_ip" +#define SECTION_PEER_PORT "peer_port" +#define SECTION_PEER_PRIVATE "peer_private" +#define SECTION_PEER_SCAN_INTERVAL_MIN "peer_scan_interval_min" +#define SECTION_PEER_SSL_CIPHER_LIST "peer_ssl_cipher_list" +#define SECTION_PEER_START_MAX "peer_start_max" +#define SECTION_RPC_ALLOW_REMOTE "rpc_allow_remote" +#define SECTION_RPC_ADMIN_ALLOW "rpc_admin_allow" +#define SECTION_RPC_ADMIN_USER "rpc_admin_user" +#define SECTION_RPC_ADMIN_PASSWORD "rpc_admin_password" +#define SECTION_RPC_IP "rpc_ip" +#define SECTION_RPC_PORT "rpc_port" +#define SECTION_RPC_USER "rpc_user" +#define SECTION_RPC_PASSWORD "rpc_password" +#define SECTION_RPC_STARTUP "rpc_startup" +#define SECTION_RPC_SECURE "rpc_secure" +#define SECTION_RPC_SSL_CERT "rpc_ssl_cert" +#define SECTION_RPC_SSL_CHAIN "rpc_ssl_chain" +#define SECTION_RPC_SSL_KEY "rpc_ssl_key" +#define SECTION_SMS_FROM "sms_from" +#define SECTION_SMS_KEY "sms_key" +#define SECTION_SMS_SECRET "sms_secret" +#define SECTION_SMS_TO "sms_to" +#define SECTION_SMS_URL "sms_url" +#define SECTION_SNTP "sntp_servers" +#define SECTION_SSL_VERIFY "ssl_verify" +#define SECTION_SSL_VERIFY_FILE "ssl_verify_file" +#define SECTION_SSL_VERIFY_DIR "ssl_verify_dir" +#define SECTION_VALIDATORS_FILE "validators_file" +#define SECTION_VALIDATION_QUORUM "validation_quorum" +#define SECTION_VALIDATION_SEED "validation_seed" +#define SECTION_WEBSOCKET_PUBLIC_IP "websocket_public_ip" +#define SECTION_WEBSOCKET_PUBLIC_PORT "websocket_public_port" +#define SECTION_WEBSOCKET_PUBLIC_SECURE "websocket_public_secure" +#define SECTION_WEBSOCKET_PING_FREQ "websocket_ping_frequency" +#define SECTION_WEBSOCKET_IP "websocket_ip" +#define SECTION_WEBSOCKET_PORT "websocket_port" +#define SECTION_WEBSOCKET_SECURE "websocket_secure" +#define SECTION_WEBSOCKET_SSL_CERT "websocket_ssl_cert" +#define SECTION_WEBSOCKET_SSL_CHAIN "websocket_ssl_chain" +#define SECTION_WEBSOCKET_SSL_KEY "websocket_ssl_key" +#define SECTION_VALIDATORS "validators" +#define SECTION_VALIDATORS_SITE "validators_site" + +#endif diff --git a/modules/ripple_core/ripple_core.h b/modules/ripple_core/ripple_core.h index 4b8b13a488..e3849298c7 100644 --- a/modules/ripple_core/ripple_core.h +++ b/modules/ripple_core/ripple_core.h @@ -30,6 +30,7 @@ namespace ripple // VFALCO NOTE Indentation shows dependency hierarchy // +/***/#include "functional/ripple_ConfigSections.h" /**/#include "functional/ripple_Config.h" /**/#include "functional/ripple_ILoadFeeTrack.h" /*..*/#include "functional/ripple_LoadEvent.h" diff --git a/modules/ripple_core/validator/ripple_Validators.cpp b/modules/ripple_core/validator/ripple_Validators.cpp index e722d0b624..60eaed5279 100644 --- a/modules/ripple_core/validator/ripple_Validators.cpp +++ b/modules/ripple_core/validator/ripple_Validators.cpp @@ -402,7 +402,7 @@ private: class ValidatorListTests : public UnitTest { public: - ValidatorListTests () : UnitTest ("ValidatorList") + ValidatorListTests () : UnitTest ("ValidatorList", "ripple") { } diff --git a/modules/ripple_mdb/ripple_mdb.c b/modules/ripple_mdb/ripple_mdb.c index 8f66bbcf83..53d03f11fa 100644 --- a/modules/ripple_mdb/ripple_mdb.c +++ b/modules/ripple_mdb/ripple_mdb.c @@ -12,7 +12,7 @@ #if RIPPLE_MDB_AVAILABLE -#include "libraries/liblmdb/mdb.c" -#include "libraries/liblmdb/midl.c" +#include "mdb/libraries/liblmdb/mdb.c" +#include "mdb/libraries/liblmdb/midl.c" #endif diff --git a/modules/ripple_mdb/ripple_mdb.h b/modules/ripple_mdb/ripple_mdb.h index faefb502b3..03efbc4eb7 100644 --- a/modules/ripple_mdb/ripple_mdb.h +++ b/modules/ripple_mdb/ripple_mdb.h @@ -12,7 +12,7 @@ #if ! BEAST_WIN32 #define RIPPLE_MDB_AVAILABLE 1 -#include "libraries/liblmdb/lmdb.h" +#include "mdb/libraries/liblmdb/lmdb.h" #else // mdb is unsupported on Win32 diff --git a/rippled-example.cfg b/rippled-example.cfg index a4173c6ced..7d59091afb 100644 --- a/rippled-example.cfg +++ b/rippled-example.cfg @@ -222,27 +222,53 @@ # Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE # shfArahZT9Q9ckTf3s1psJ7C7qzVN # +# +#------------------------------------------------------------------------------- +# # [node_db] # [temp_db] -# Set the choice of databases for storing Node objects. +# [import_db] +# +# Set database options for storing node objects in the primary database, +# caching node objects in the temporary database, or importing node objects +# from a previous database. +# # Format (without spaces): -# '=' [ '|' '=' ]... +# One or more lines of key / value pairs: +# '=' +# ... +# # Examples: -# type=HyperLevelDB|path=db/hashnode -# Choices for 'type': +# type=HyperLevelDB +# path=db/hashnode +# +# Choices for 'type' (not case-sensitive) # HyperLevelDB Use an improved version of LevelDB (preferred) # LevelDB Use Google's LevelDB database (deprecated) # MDB Use MDB +# none Use no backend # KeyvaDB Use OpenCoin's KeyvaDB (experimental) +# SQLite Use SQLite +# # Required keys: # path Location to store the database (all types) +# # Optional keys: -# ... -# Notes +# (none yet) +# +# Notes: +# # The 'node_db' entry configures the primary, persistent storage. +# # The 'temp_db' configures a look-aside cache for high volume storage # which doesn't necessarily persist between server launches. # +# The 'import_db' is used with the '--import' command line option to +# migrate the specified database into the current database given +# in the [node_db] section. +# +#------------------------------------------------------------------------------- +# # [node_size] # Tunes the servers based on the expected load and available memory. Legal # sizes are "tiny", "small", "medium", "large", and "huge". We recommend @@ -316,8 +342,9 @@ [node_size] medium -[node_db] -type=mdb|path=db +#[node_db] +#type=HyperLevelDB +#path=hyperldb [debug_logfile] log/debug.log diff --git a/src/cpp/ripple/NetworkOPs.cpp b/src/cpp/ripple/NetworkOPs.cpp index dc16a90831..7c2a410e5b 100644 --- a/src/cpp/ripple/NetworkOPs.cpp +++ b/src/cpp/ripple/NetworkOPs.cpp @@ -42,47 +42,52 @@ NetworkOPs::NetworkOPs (LedgerMaster* pLedgerMaster) void NetworkOPs::processNetTimer () { - ScopedLock sl (getApp().getMasterLock ()); - - getApp().getLoadManager ().resetDeadlockDetector (); - - std::size_t const numPeers = getApp().getPeers ().getPeerVector ().size (); - - // do we have sufficient peers? If not, we are disconnected. - if (numPeers < theConfig.NETWORK_QUORUM) { - if (mMode != omDISCONNECTED) + Application::ScopedLockType lock (getApp().getMasterLock (), __FILE__, __LINE__); + + // VFALCO NOTE This is for diagnosing a crash on exit + Application& app (getApp ()); + ILoadManager& mgr (app.getLoadManager ()); + mgr.resetDeadlockDetector (); + + std::size_t const numPeers = getApp().getPeers ().getPeerVector ().size (); + + // do we have sufficient peers? If not, we are disconnected. + if (numPeers < theConfig.NETWORK_QUORUM) { - setMode (omDISCONNECTED); - WriteLog (lsWARNING, NetworkOPs) - << "Node count (" << numPeers << ") " - << "has fallen below quorum (" << theConfig.NETWORK_QUORUM << ")."; + if (mMode != omDISCONNECTED) + { + setMode (omDISCONNECTED); + WriteLog (lsWARNING, NetworkOPs) + << "Node count (" << numPeers << ") " + << "has fallen below quorum (" << theConfig.NETWORK_QUORUM << ")."; + } + + return; } - return; - } + if (mMode == omDISCONNECTED) + { + setMode (omCONNECTED); + WriteLog (lsINFO, NetworkOPs) << "Node count (" << numPeers << ") is sufficient."; + } - if (mMode == omDISCONNECTED) - { - setMode (omCONNECTED); - WriteLog (lsINFO, NetworkOPs) << "Node count (" << numPeers << ") is sufficient."; - } + // Check if the last validated ledger forces a change between these states + if (mMode == omSYNCING) + { + setMode (omSYNCING); + } + else if (mMode == omCONNECTED) + { + setMode (omCONNECTED); + } - // Check if the last validated ledger forces a change between these states - if (mMode == omSYNCING) - { - setMode (omSYNCING); - } - else if (mMode == omCONNECTED) - { - setMode (omCONNECTED); - } + if (!mConsensus) + tryStartConsensus (); - if (!mConsensus) - tryStartConsensus (); - - if (mConsensus) - mConsensus->timerEntry (); + if (mConsensus) + mConsensus->timerEntry (); + } } void NetworkOPs::onDeadlineTimer (DeadlineTimer& timer) @@ -306,71 +311,72 @@ void NetworkOPs::runTransactionQueue () { LoadEvent::autoptr ev = getApp().getJobQueue ().getLoadEventAP (jtTXN_PROC, "runTxnQ"); - boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ()); - - Transaction::pointer dbtx = getApp().getMasterTransaction ().fetch (txn->getID (), true); - assert (dbtx); - - bool didApply; - TER r = mLedgerMaster->doTransaction (dbtx->getSTransaction (), - tapOPEN_LEDGER | tapNO_CHECK_SIGN, didApply); - dbtx->setResult (r); - - if (isTemMalformed (r)) // malformed, cache bad - getApp().getHashRouter ().setFlag (txn->getID (), SF_BAD); -// else if (isTelLocal (r) || isTerRetry (r)) // can be retried -// getApp().getHashRouter ().setFlag (txn->getID (), SF_RETRY); - - - if (isTerRetry (r)) { - // transaction should be held - WriteLog (lsDEBUG, NetworkOPs) << "QTransaction should be held: " << r; - dbtx->setStatus (HELD); - getApp().getMasterTransaction ().canonicalize (dbtx); - mLedgerMaster->addHeldTransaction (dbtx); - } - else if (r == tefPAST_SEQ) - { - // duplicate or conflict - WriteLog (lsINFO, NetworkOPs) << "QTransaction is obsolete"; - dbtx->setStatus (OBSOLETE); - } - else if (r == tesSUCCESS) - { - WriteLog (lsINFO, NetworkOPs) << "QTransaction is now included in open ledger"; - dbtx->setStatus (INCLUDED); - getApp().getMasterTransaction ().canonicalize (dbtx); - } - else - { - WriteLog (lsDEBUG, NetworkOPs) << "QStatus other than success " << r; - dbtx->setStatus (INVALID); - } + Application::ScopedLockType lock (getApp().getMasterLock (), __FILE__, __LINE__); -// if (didApply || (mMode != omFULL)) - if (didApply) - { - std::set peers; + Transaction::pointer dbtx = getApp().getMasterTransaction ().fetch (txn->getID (), true); + assert (dbtx); - if (getApp().getHashRouter ().swapSet (txn->getID (), peers, SF_RELAYED)) + bool didApply; + TER r = mLedgerMaster->doTransaction (dbtx->getSTransaction (), + tapOPEN_LEDGER | tapNO_CHECK_SIGN, didApply); + dbtx->setResult (r); + + if (isTemMalformed (r)) // malformed, cache bad + getApp().getHashRouter ().setFlag (txn->getID (), SF_BAD); + // else if (isTelLocal (r) || isTerRetry (r)) // can be retried + // getApp().getHashRouter ().setFlag (txn->getID (), SF_RETRY); + + + if (isTerRetry (r)) { - WriteLog (lsDEBUG, NetworkOPs) << "relaying"; - protocol::TMTransaction tx; - Serializer s; - dbtx->getSTransaction ()->add (s); - tx.set_rawtransaction (&s.getData ().front (), s.getLength ()); - tx.set_status (protocol::tsCURRENT); - tx.set_receivetimestamp (getNetworkTimeNC ()); // FIXME: This should be when we received it - - PackedMessage::pointer packet = boost::make_shared (tx, protocol::mtTRANSACTION); - getApp().getPeers ().relayMessageBut (peers, packet); + // transaction should be held + WriteLog (lsDEBUG, NetworkOPs) << "QTransaction should be held: " << r; + dbtx->setStatus (HELD); + getApp().getMasterTransaction ().canonicalize (dbtx); + mLedgerMaster->addHeldTransaction (dbtx); + } + else if (r == tefPAST_SEQ) + { + // duplicate or conflict + WriteLog (lsINFO, NetworkOPs) << "QTransaction is obsolete"; + dbtx->setStatus (OBSOLETE); + } + else if (r == tesSUCCESS) + { + WriteLog (lsINFO, NetworkOPs) << "QTransaction is now included in open ledger"; + dbtx->setStatus (INCLUDED); + getApp().getMasterTransaction ().canonicalize (dbtx); } else - WriteLog(lsDEBUG, NetworkOPs) << "recently relayed"; - } + { + WriteLog (lsDEBUG, NetworkOPs) << "QStatus other than success " << r; + dbtx->setStatus (INVALID); + } - txn->doCallbacks (r); + if (didApply /*|| (mMode != omFULL)*/ ) + { + std::set peers; + + if (getApp().getHashRouter ().swapSet (txn->getID (), peers, SF_RELAYED)) + { + WriteLog (lsDEBUG, NetworkOPs) << "relaying"; + protocol::TMTransaction tx; + Serializer s; + dbtx->getSTransaction ()->add (s); + tx.set_rawtransaction (&s.getData ().front (), s.getLength ()); + tx.set_status (protocol::tsCURRENT); + tx.set_receivetimestamp (getNetworkTimeNC ()); // FIXME: This should be when we received it + + PackedMessage::pointer packet = boost::make_shared (tx, protocol::mtTRANSACTION); + getApp().getPeers ().relayMessageBut (peers, packet); + } + else + WriteLog(lsDEBUG, NetworkOPs) << "recently relayed"; + } + + txn->doCallbacks (r); + } } } @@ -407,77 +413,79 @@ Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans, getApp().getHashRouter ().setFlag (trans->getID (), SF_SIGGOOD); } - boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ()); - bool didApply; - TER r = mLedgerMaster->doTransaction (trans->getSTransaction (), - bAdmin ? (tapOPEN_LEDGER | tapNO_CHECK_SIGN | tapADMIN) : (tapOPEN_LEDGER | tapNO_CHECK_SIGN), didApply); - trans->setResult (r); + { + Application::ScopedLockType lock (getApp().getMasterLock (), __FILE__, __LINE__); - if (isTemMalformed (r)) // malformed, cache bad - getApp().getHashRouter ().setFlag (trans->getID (), SF_BAD); -// else if (isTelLocal (r) || isTerRetry (r)) // can be retried -// getApp().getHashRouter ().setFlag (trans->getID (), SF_RETRY); + bool didApply; + TER r = mLedgerMaster->doTransaction (trans->getSTransaction (), + bAdmin ? (tapOPEN_LEDGER | tapNO_CHECK_SIGN | tapADMIN) : (tapOPEN_LEDGER | tapNO_CHECK_SIGN), didApply); + trans->setResult (r); + + if (isTemMalformed (r)) // malformed, cache bad + getApp().getHashRouter ().setFlag (trans->getID (), SF_BAD); + // else if (isTelLocal (r) || isTerRetry (r)) // can be retried + // getApp().getHashRouter ().setFlag (trans->getID (), SF_RETRY); #ifdef BEAST_DEBUG - - if (r != tesSUCCESS) - { - std::string token, human; - CondLog (transResultInfo (r, token, human), lsINFO, NetworkOPs) << "TransactionResult: " << token << ": " << human; - } + if (r != tesSUCCESS) + { + std::string token, human; + CondLog (transResultInfo (r, token, human), lsINFO, NetworkOPs) << "TransactionResult: " << token << ": " << human; + } #endif - if (callback) - callback (trans, r); + if (callback) + callback (trans, r); - if (r == tefFAILURE) - throw Fault (IO_ERROR); + if (r == tefFAILURE) + throw Fault (IO_ERROR); - if (r == tesSUCCESS) - { - WriteLog (lsINFO, NetworkOPs) << "Transaction is now included in open ledger"; - trans->setStatus (INCLUDED); - getApp().getMasterTransaction ().canonicalize (trans); - } - else if (r == tefPAST_SEQ) - { - // duplicate or conflict - WriteLog (lsINFO, NetworkOPs) << "Transaction is obsolete"; - trans->setStatus (OBSOLETE); - } - else if (isTerRetry (r)) - { - if (!bFailHard) + if (r == tesSUCCESS) { - // transaction should be held - WriteLog (lsDEBUG, NetworkOPs) << "Transaction should be held: " << r; - trans->setStatus (HELD); - getApp().getMasterTransaction ().canonicalize (trans); - mLedgerMaster->addHeldTransaction (trans); + WriteLog (lsINFO, NetworkOPs) << "Transaction is now included in open ledger"; + trans->setStatus (INCLUDED); + getApp().getMasterTransaction ().canonicalize (trans); } - } - else - { - WriteLog (lsDEBUG, NetworkOPs) << "Status other than success " << r; - trans->setStatus (INVALID); - } - - if (didApply || ((mMode != omFULL) && !bFailHard)) - { - std::set peers; - - if (getApp().getHashRouter ().swapSet (trans->getID (), peers, SF_RELAYED)) + else if (r == tefPAST_SEQ) { - protocol::TMTransaction tx; - Serializer s; - trans->getSTransaction ()->add (s); - tx.set_rawtransaction (&s.getData ().front (), s.getLength ()); - tx.set_status (protocol::tsCURRENT); - tx.set_receivetimestamp (getNetworkTimeNC ()); // FIXME: This should be when we received it + // duplicate or conflict + WriteLog (lsINFO, NetworkOPs) << "Transaction is obsolete"; + trans->setStatus (OBSOLETE); + } + else if (isTerRetry (r)) + { + if (!bFailHard) + { + // transaction should be held + WriteLog (lsDEBUG, NetworkOPs) << "Transaction should be held: " << r; + trans->setStatus (HELD); + getApp().getMasterTransaction ().canonicalize (trans); + mLedgerMaster->addHeldTransaction (trans); + } + } + else + { + WriteLog (lsDEBUG, NetworkOPs) << "Status other than success " << r; + trans->setStatus (INVALID); + } - PackedMessage::pointer packet = boost::make_shared (tx, protocol::mtTRANSACTION); - getApp().getPeers ().relayMessageBut (peers, packet); + if (didApply || ((mMode != omFULL) && !bFailHard)) + { + std::set peers; + + if (getApp().getHashRouter ().swapSet (trans->getID (), peers, SF_RELAYED)) + { + protocol::TMTransaction tx; + Serializer s; + trans->getSTransaction ()->add (s); + tx.set_rawtransaction (&s.getData ().front (), s.getLength ()); + tx.set_status (protocol::tsCURRENT); + tx.set_receivetimestamp (getNetworkTimeNC ()); // FIXME: This should be when we received it + + PackedMessage::pointer packet = boost::make_shared (tx, protocol::mtTRANSACTION); + getApp().getPeers ().relayMessageBut (peers, packet); + } } } @@ -955,49 +963,53 @@ uint256 NetworkOPs::getConsensusLCL () void NetworkOPs::processTrustedProposal (LedgerProposal::pointer proposal, boost::shared_ptr set, RippleAddress nodePublic, uint256 checkLedger, bool sigGood) { - boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ()); - - bool relay = true; - - if (!haveConsensusObject ()) { - WriteLog (lsINFO, NetworkOPs) << "Received proposal outside consensus window"; + Application::ScopedLockType lock (getApp().getMasterLock (), __FILE__, __LINE__); - if (mMode == omFULL) - relay = false; - } - else - { - storeProposal (proposal, nodePublic); + bool relay = true; - uint256 consensusLCL = mConsensus->getLCL (); - - if (!set->has_previousledger () && (checkLedger != consensusLCL)) + if (!haveConsensusObject ()) { - WriteLog (lsWARNING, NetworkOPs) << "Have to re-check proposal signature due to consensus view change"; - assert (proposal->hasSignature ()); - proposal->setPrevLedger (consensusLCL); + WriteLog (lsINFO, NetworkOPs) << "Received proposal outside consensus window"; - if (proposal->checkSign ()) - sigGood = true; + if (mMode == omFULL) + relay = false; + } + else + { + storeProposal (proposal, nodePublic); + + uint256 consensusLCL = mConsensus->getLCL (); + + if (!set->has_previousledger () && (checkLedger != consensusLCL)) + { + WriteLog (lsWARNING, NetworkOPs) << "Have to re-check proposal signature due to consensus view change"; + assert (proposal->hasSignature ()); + proposal->setPrevLedger (consensusLCL); + + if (proposal->checkSign ()) + sigGood = true; + } + + if (sigGood && (consensusLCL == proposal->getPrevLedger ())) + { + relay = mConsensus->peerPosition (proposal); + WriteLog (lsTRACE, NetworkOPs) << "Proposal processing finished, relay=" << relay; + } } - if (sigGood && (consensusLCL == proposal->getPrevLedger ())) + if (relay) { - relay = mConsensus->peerPosition (proposal); - WriteLog (lsTRACE, NetworkOPs) << "Proposal processing finished, relay=" << relay; + std::set peers; + getApp().getHashRouter ().swapSet (proposal->getHashRouter (), peers, SF_RELAYED); + PackedMessage::pointer message = boost::make_shared (*set, protocol::mtPROPOSE_LEDGER); + getApp().getPeers ().relayMessageBut (peers, message); + } + else + { + WriteLog (lsINFO, NetworkOPs) << "Not relaying trusted proposal"; } } - - if (relay) - { - std::set peers; - getApp().getHashRouter ().swapSet (proposal->getHashRouter (), peers, SF_RELAYED); - PackedMessage::pointer message = boost::make_shared (*set, protocol::mtPROPOSE_LEDGER); - getApp().getPeers ().relayMessageBut (peers, message); - } - else - WriteLog (lsINFO, NetworkOPs) << "Not relaying trusted proposal"; } SHAMap::pointer NetworkOPs::getTXMap (uint256 const& hash) @@ -1039,8 +1051,10 @@ SHAMapAddNode NetworkOPs::gotTXData (const boost::shared_ptr& peer, uint25 { boost::shared_ptr consensus; + { - ScopedLock mlh(getApp().getMasterLock()); + Application::ScopedLockType lock (getApp ().getMasterLock (), __FILE__, __LINE__); + consensus = mConsensus; } diff --git a/src/cpp/ripple/RPCHandler.cpp b/src/cpp/ripple/RPCHandler.cpp index 83add283a3..b2cf6e6d06 100644 --- a/src/cpp/ripple/RPCHandler.cpp +++ b/src/cpp/ripple/RPCHandler.cpp @@ -58,7 +58,7 @@ RPCHandler::RPCHandler (NetworkOPs* netOps, InfoSub::pointer infoSub) : mNetOps ; } -Json::Value RPCHandler::transactionSign (Json::Value params, bool bSubmit, bool bFailHard, ScopedLock& mlh) +Json::Value RPCHandler::transactionSign (Json::Value params, bool bSubmit, bool bFailHard, Application::ScopedLockType& mlh) { if (getApp().getFeeTrack().isLoadedCluster() && (mRole != ADMIN)) return rpcError(rpcTOO_BUSY); @@ -595,7 +595,7 @@ Json::Value RPCHandler::accountFromString (Ledger::ref lrLedger, RippleAddress& // ledger_hash : // ledger_index : // } -Json::Value RPCHandler::doAccountInfo (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doAccountInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -640,7 +640,7 @@ Json::Value RPCHandler::doAccountInfo (Json::Value params, LoadType* loadType, S // port: // } // XXX Might allow domain for manual connections. -Json::Value RPCHandler::doConnect (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doConnect (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (theConfig.RUN_STANDALONE) return "cannot connect in standalone mode"; @@ -661,7 +661,7 @@ Json::Value RPCHandler::doConnect (Json::Value params, LoadType* loadType, Scope // { // key: // } -Json::Value RPCHandler::doDataDelete (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doDataDelete (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("key")) return rpcError (rpcINVALID_PARAMS); @@ -687,7 +687,7 @@ Json::Value RPCHandler::doDataDelete (Json::Value params, LoadType* loadType, Sc // { // key: // } -Json::Value RPCHandler::doDataFetch (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doDataFetch (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("key")) return rpcError (rpcINVALID_PARAMS); @@ -711,7 +711,7 @@ Json::Value RPCHandler::doDataFetch (Json::Value params, LoadType* loadType, Sco // key: // value: // } -Json::Value RPCHandler::doDataStore (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doDataStore (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("key") || !params.isMember ("value")) @@ -772,7 +772,7 @@ Json::Value RPCHandler::doNicknameInfo (Json::Value params) // 'account_index' : // optional // } // XXX This would be better if it took the ledger. -Json::Value RPCHandler::doOwnerInfo (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doOwnerInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("account") && !params.isMember ("ident")) return rpcError (rpcINVALID_PARAMS); @@ -797,7 +797,7 @@ Json::Value RPCHandler::doOwnerInfo (Json::Value params, LoadType* loadType, Sco return ret; } -Json::Value RPCHandler::doPeers (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doPeers (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value jvResult (Json::objectValue); @@ -808,7 +808,7 @@ Json::Value RPCHandler::doPeers (Json::Value, LoadType* loadType, ScopedLock& Ma return jvResult; } -Json::Value RPCHandler::doPing (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doPing (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { return Json::Value (Json::objectValue); } @@ -818,7 +818,7 @@ Json::Value RPCHandler::doPing (Json::Value, LoadType* loadType, ScopedLock& Mas // issuer is the offering account // --> submit: 'submit|true|false': defaults to false // Prior to running allow each to have a credit line of what they will be getting from the other account. -Json::Value RPCHandler::doProfile (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doProfile (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { /* need to fix now that sharedOfferCreate is gone int iArgs = params.size(); @@ -910,9 +910,9 @@ Json::Value RPCHandler::doProfile (Json::Value params, LoadType* loadType, Scope // difficulty: // optional // secret: // optional // } -Json::Value RPCHandler::doProofCreate (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doProofCreate (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); // XXX: Add ability to create proof with arbitrary time Json::Value jvResult (Json::objectValue); @@ -955,9 +955,9 @@ Json::Value RPCHandler::doProofCreate (Json::Value params, LoadType* loadType, S // { // token: // } -Json::Value RPCHandler::doProofSolve (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doProofSolve (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); Json::Value jvResult; @@ -985,9 +985,9 @@ Json::Value RPCHandler::doProofSolve (Json::Value params, LoadType* loadType, Sc // difficulty: // optional // secret: // optional // } -Json::Value RPCHandler::doProofVerify (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doProofVerify (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); // XXX Add ability to check proof against arbitrary time Json::Value jvResult; @@ -1055,7 +1055,7 @@ Json::Value RPCHandler::doProofVerify (Json::Value params, LoadType* loadType, S // ledger_hash : // ledger_index : // } -Json::Value RPCHandler::doAccountLines (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doAccountLines (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -1067,7 +1067,7 @@ Json::Value RPCHandler::doAccountLines (Json::Value params, LoadType* loadType, if (lpLedger->isImmutable ()) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); bUnlocked = true; } @@ -1140,7 +1140,7 @@ Json::Value RPCHandler::doAccountLines (Json::Value params, LoadType* loadType, } if (!bUnlocked) - MasterLockHolder.unlock (); + masterLockHolder.unlock (); } else { @@ -1168,7 +1168,7 @@ static void offerAdder (Json::Value& jvLines, SLE::ref offer) // ledger_hash : // ledger_index : // } -Json::Value RPCHandler::doAccountOffers (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doAccountOffers (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -1180,7 +1180,7 @@ Json::Value RPCHandler::doAccountOffers (Json::Value params, LoadType* loadType, if (lpLedger->isImmutable ()) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); bUnlocked = true; } @@ -1212,7 +1212,7 @@ Json::Value RPCHandler::doAccountOffers (Json::Value params, LoadType* loadType, lpLedger->visitAccountItems (raAccount.getAccountID (), BIND_TYPE (&offerAdder, boost::ref (jvsOffers), P_1)); if (!bUnlocked) - MasterLockHolder.unlock (); + masterLockHolder.unlock (); return jvResult; } @@ -1227,7 +1227,7 @@ Json::Value RPCHandler::doAccountOffers (Json::Value params, LoadType* loadType, // "limit" : integer, // Optional. // "proof" : boolean // Defaults to false. // } -Json::Value RPCHandler::doBookOffers (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doBookOffers (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (getApp().getJobQueue ().getJobCountGE (jtCLIENT) > 200) { @@ -1241,7 +1241,7 @@ Json::Value RPCHandler::doBookOffers (Json::Value params, LoadType* loadType, Sc return jvResult; if (lpLedger->isImmutable ()) - MasterLockHolder.unlock (); + masterLockHolder.unlock (); if (!params.isMember ("taker_pays") || !params.isMember ("taker_gets") || !params["taker_pays"].isObject () || !params["taker_gets"].isObject ()) return rpcError (rpcINVALID_PARAMS); @@ -1328,9 +1328,9 @@ Json::Value RPCHandler::doBookOffers (Json::Value params, LoadType* loadType, Sc // { // random: // } -Json::Value RPCHandler::doRandom (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doRandom (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); uint256 uRandom; try @@ -1349,7 +1349,7 @@ Json::Value RPCHandler::doRandom (Json::Value params, LoadType* loadType, Scoped } } -Json::Value RPCHandler::doPathFind (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doPathFind (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("subcommand") || !params["subcommand"].isString ()) return rpcError (rpcINVALID_PARAMS); @@ -1404,7 +1404,7 @@ Json::Value RPCHandler::doPathFind (Json::Value params, LoadType* loadType, Scop // - Allows clients to verify path exists. // - Return canonicalized path. // - From a trusted server, allows clients to use path without manipulation. -Json::Value RPCHandler::doRipplePathFind (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doRipplePathFind (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { int jc = getApp().getJobQueue ().getJobCountGE (jtCLIENT); @@ -1487,7 +1487,7 @@ Json::Value RPCHandler::doRipplePathFind (Json::Value params, LoadType* loadType *loadType = LT_RPCBurden; Ledger::pointer lSnapShot = boost::make_shared (boost::ref (*lpLedger), false); - MasterLockHolder.unlock (); // As long as we have a locked copy of the ledger, we can unlock. + masterLockHolder.unlock (); // As long as we have a locked copy of the ledger, we can unlock. // Fill in currencies destination will accept Json::Value jvDestCur (Json::arrayValue); @@ -1636,23 +1636,23 @@ Json::Value RPCHandler::doRipplePathFind (Json::Value params, LoadType* loadType // tx_json: , // secret: // } -Json::Value RPCHandler::doSign (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doSign (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { *loadType = LT_RPCBurden; bool bFailHard = params.isMember ("fail_hard") && params["fail_hard"].asBool (); - return transactionSign (params, false, bFailHard, MasterLockHolder); + return transactionSign (params, false, bFailHard, masterLockHolder); } // { // tx_json: , // secret: // } -Json::Value RPCHandler::doSubmit (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doSubmit (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("tx_blob")) { bool bFailHard = params.isMember ("fail_hard") && params["fail_hard"].asBool (); - return transactionSign (params, true, bFailHard, MasterLockHolder); + return transactionSign (params, true, bFailHard, masterLockHolder); } Json::Value jvResult; @@ -1710,7 +1710,7 @@ Json::Value RPCHandler::doSubmit (Json::Value params, LoadType* loadType, Scoped return jvResult; } - MasterLockHolder.unlock (); + masterLockHolder.unlock (); try { @@ -1740,7 +1740,7 @@ Json::Value RPCHandler::doSubmit (Json::Value params, LoadType* loadType, Scoped } } -Json::Value RPCHandler::doConsensusInfo (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doConsensusInfo (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value ret (Json::objectValue); @@ -1749,9 +1749,9 @@ Json::Value RPCHandler::doConsensusInfo (Json::Value, LoadType* loadType, Scoped return ret; } -Json::Value RPCHandler::doFetchInfo (Json::Value jvParams, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doFetchInfo (Json::Value jvParams, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); Json::Value ret (Json::objectValue); @@ -1766,7 +1766,7 @@ Json::Value RPCHandler::doFetchInfo (Json::Value jvParams, LoadType* loadType, S return ret; } -Json::Value RPCHandler::doServerInfo (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doServerInfo (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value ret (Json::objectValue); @@ -1775,7 +1775,7 @@ Json::Value RPCHandler::doServerInfo (Json::Value, LoadType* loadType, ScopedLoc return ret; } -Json::Value RPCHandler::doServerState (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doServerState (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value ret (Json::objectValue); @@ -1787,9 +1787,9 @@ Json::Value RPCHandler::doServerState (Json::Value, LoadType* loadType, ScopedLo // { // start: // } -Json::Value RPCHandler::doTxHistory (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doTxHistory (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); if (!params.isMember ("start")) return rpcError (rpcINVALID_PARAMS); @@ -1824,7 +1824,7 @@ Json::Value RPCHandler::doTxHistory (Json::Value params, LoadType* loadType, Sco // { // transaction: // } -Json::Value RPCHandler::doTx (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doTx (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("transaction")) return rpcError (rpcINVALID_PARAMS); @@ -1890,7 +1890,7 @@ Json::Value RPCHandler::doTx (Json::Value params, LoadType* loadType, ScopedLock return rpcError (rpcNOT_IMPL); } -Json::Value RPCHandler::doLedgerClosed (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLedgerClosed (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value jvResult; @@ -1903,7 +1903,7 @@ Json::Value RPCHandler::doLedgerClosed (Json::Value, LoadType* loadType, ScopedL return jvResult; } -Json::Value RPCHandler::doLedgerCurrent (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLedgerCurrent (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value jvResult; @@ -1917,7 +1917,7 @@ Json::Value RPCHandler::doLedgerCurrent (Json::Value, LoadType* loadType, Scoped // ledger: 'current' | 'closed' | | , // optional // full: true | false // optional, defaults to false. // } -Json::Value RPCHandler::doLedger (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLedger (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("ledger") && !params.isMember ("ledger_hash") && !params.isMember ("ledger_index")) { @@ -1939,7 +1939,7 @@ Json::Value RPCHandler::doLedger (Json::Value params, LoadType* loadType, Scoped return jvResult; if (lpLedger->isImmutable ()) - MasterLockHolder.unlock (); + masterLockHolder.unlock (); bool bFull = params.isMember ("full") && params["full"].asBool (); bool bTransactions = params.isMember ("transactions") && params["transactions"].asBool (); @@ -1966,7 +1966,7 @@ Json::Value RPCHandler::doLedger (Json::Value params, LoadType* loadType, Scoped // offset: integer, // optional, defaults to 0 // limit: integer // optional // } -Json::Value RPCHandler::doAccountTransactions (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doAccountTransactions (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { RippleAddress raAccount; uint32 offset = params.isMember ("offset") ? params["offset"].asUInt () : 0; @@ -2035,7 +2035,7 @@ Json::Value RPCHandler::doAccountTransactions (Json::Value params, LoadType* loa try { #endif - MasterLockHolder.unlock (); + masterLockHolder.unlock (); Json::Value ret (Json::objectValue); @@ -2111,7 +2111,7 @@ Json::Value RPCHandler::doAccountTransactions (Json::Value params, LoadType* loa // } // // This command requires admin access because it makes no sense to ask an untrusted server for this. -Json::Value RPCHandler::doValidationCreate (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doValidationCreate (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { RippleAddress raSeed; Json::Value obj (Json::objectValue); @@ -2137,7 +2137,7 @@ Json::Value RPCHandler::doValidationCreate (Json::Value params, LoadType* loadTy // { // secret: // } -Json::Value RPCHandler::doValidationSeed (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doValidationSeed (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value obj (Json::objectValue); @@ -2208,7 +2208,7 @@ Json::Value RPCHandler::accounts (Ledger::ref lrLedger, const RippleAddress& naM // ledger_hash : // ledger_index : // } -Json::Value RPCHandler::doWalletAccounts (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doWalletAccounts (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -2251,7 +2251,7 @@ Json::Value RPCHandler::doWalletAccounts (Json::Value params, LoadType* loadType } } -Json::Value RPCHandler::doLogRotate (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLogRotate (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { return Log::rotateLog (); } @@ -2259,9 +2259,9 @@ Json::Value RPCHandler::doLogRotate (Json::Value, LoadType* loadType, ScopedLock // { // passphrase: // } -Json::Value RPCHandler::doWalletPropose (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doWalletPropose (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); RippleAddress naSeed; RippleAddress naAccount; @@ -2291,7 +2291,7 @@ Json::Value RPCHandler::doWalletPropose (Json::Value params, LoadType* loadType, // { // secret: // } -Json::Value RPCHandler::doWalletSeed (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doWalletSeed (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { RippleAddress raSeed; bool bSecret = params.isMember ("secret"); @@ -2330,7 +2330,7 @@ Json::Value RPCHandler::doWalletSeed (Json::Value params, LoadType* loadType, Sc // username: , // password: // } -Json::Value RPCHandler::doLogin (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLogin (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("username") || !params.isMember ("password")) @@ -2368,7 +2368,7 @@ static void textTime (std::string& text, int& seconds, const char* unitName, int text += "s"; } -Json::Value RPCHandler::doFeature (Json::Value params, LoadType* loadType, ScopedLock& mlh) +Json::Value RPCHandler::doFeature (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh) { if (!params.isMember ("feature")) { @@ -2397,7 +2397,7 @@ Json::Value RPCHandler::doFeature (Json::Value params, LoadType* loadType, Scope // { // min_count: // optional, defaults to 10 // } -Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { int minCount = 10; @@ -2449,7 +2449,7 @@ Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, Sco return ret; } -Json::Value RPCHandler::doLogLevel (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLogLevel (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { // log_level if (!params.isMember ("severity")) @@ -2502,7 +2502,7 @@ Json::Value RPCHandler::doLogLevel (Json::Value params, LoadType* loadType, Scop // node: |, // comment: // optional // } -Json::Value RPCHandler::doUnlAdd (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlAdd (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { std::string strNode = params.isMember ("node") ? params["node"].asString () : ""; std::string strComment = params.isMember ("comment") ? params["comment"].asString () : ""; @@ -2526,7 +2526,7 @@ Json::Value RPCHandler::doUnlAdd (Json::Value params, LoadType* loadType, Scoped // { // node: | // } -Json::Value RPCHandler::doUnlDelete (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlDelete (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("node")) return rpcError (rpcINVALID_PARAMS); @@ -2549,7 +2549,7 @@ Json::Value RPCHandler::doUnlDelete (Json::Value params, LoadType* loadType, Sco } } -Json::Value RPCHandler::doUnlList (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlList (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value obj (Json::objectValue); @@ -2559,7 +2559,7 @@ Json::Value RPCHandler::doUnlList (Json::Value, LoadType* loadType, ScopedLock& } // Populate the UNL from a local validators.txt file. -Json::Value RPCHandler::doUnlLoad (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlLoad (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (theConfig.VALIDATORS_FILE.empty () || !getApp().getUNL ().nodeLoad (theConfig.VALIDATORS_FILE)) { @@ -2571,7 +2571,7 @@ Json::Value RPCHandler::doUnlLoad (Json::Value, LoadType* loadType, ScopedLock& // Populate the UNL from ripple.com's validators.txt file. -Json::Value RPCHandler::doUnlNetwork (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlNetwork (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { getApp().getUNL ().nodeNetwork (); @@ -2579,7 +2579,7 @@ Json::Value RPCHandler::doUnlNetwork (Json::Value params, LoadType* loadType, Sc } // unl_reset -Json::Value RPCHandler::doUnlReset (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlReset (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { getApp().getUNL ().nodeReset (); @@ -2587,14 +2587,14 @@ Json::Value RPCHandler::doUnlReset (Json::Value params, LoadType* loadType, Scop } // unl_score -Json::Value RPCHandler::doUnlScore (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnlScore (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { getApp().getUNL ().nodeScore (); return "scoring requested"; } -Json::Value RPCHandler::doSMS (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doSMS (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { if (!params.isMember ("text")) return rpcError (rpcINVALID_PARAMS); @@ -2603,14 +2603,14 @@ Json::Value RPCHandler::doSMS (Json::Value params, LoadType* loadType, ScopedLoc return "sms dispatched"; } -Json::Value RPCHandler::doStop (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doStop (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { getApp().stop (); return SYSTEM_NAME " server stopping"; } -Json::Value RPCHandler::doLedgerAccept (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLedgerAccept (Json::Value, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Json::Value jvResult; @@ -2633,7 +2633,7 @@ Json::Value RPCHandler::doLedgerAccept (Json::Value, LoadType* loadType, ScopedL // ledger_index : // } // XXX In this case, not specify either ledger does not mean ledger current. It means any ledger. -Json::Value RPCHandler::doTransactionEntry (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doTransactionEntry (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -2802,7 +2802,7 @@ Json::Value RPCHandler::lookupLedger (Json::Value params, Ledger::pointer& lpLed // ledger_index : // ... // } -Json::Value RPCHandler::doLedgerEntry (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLedgerEntry (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -2811,7 +2811,7 @@ Json::Value RPCHandler::doLedgerEntry (Json::Value params, LoadType* loadType, S return jvResult; if (lpLedger->isImmutable ()) - MasterLockHolder.unlock (); + masterLockHolder.unlock (); uint256 uNodeIndex; bool bNodeBinary = false; @@ -3015,7 +3015,7 @@ Json::Value RPCHandler::doLedgerEntry (Json::Value params, LoadType* loadType, S // ledger_hash : // ledger_index : // } -Json::Value RPCHandler::doLedgerHeader (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doLedgerHeader (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { Ledger::pointer lpLedger; Json::Value jvResult = lookupLedger (params, lpLedger); @@ -3057,7 +3057,7 @@ boost::unordered_set RPCHandler::parseAccountIds (const Json::Val return usnaResult; } -Json::Value RPCHandler::doSubscribe (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doSubscribe (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { InfoSub::pointer ispSub; Json::Value jvResult (Json::objectValue); @@ -3349,7 +3349,7 @@ Json::Value RPCHandler::doSubscribe (Json::Value params, LoadType* loadType, Sco } // FIXME: This leaks RPCSub objects for JSON-RPC. Shouldn't matter for anyone sane. -Json::Value RPCHandler::doUnsubscribe (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doUnsubscribe (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { InfoSub::pointer ispSub; Json::Value jvResult (Json::objectValue); @@ -3571,7 +3571,7 @@ Json::Value RPCHandler::doRpcCommand (const std::string& strMethod, Json::Value return jvResult; } -Json::Value RPCHandler::doInternal (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder) +Json::Value RPCHandler::doInternal (Json::Value params, LoadType* loadType, Application::ScopedLockType& masterLockHolder) { // Used for debug or special-purpose RPC commands if (!params.isMember ("internal_command")) @@ -3694,53 +3694,55 @@ Json::Value RPCHandler::doCommand (const Json::Value& params, int iRole, LoadTyp return rpcError (rpcNO_PERMISSION); } - ScopedLock MasterLockHolder (getApp().getMasterLock ()); + { + Application::ScopedLockType lock (getApp().getMasterLock (), __FILE__, __LINE__); - if ((commandsA[i].iOptions & optNetwork) && (mNetOps->getOperatingMode () < NetworkOPs::omSYNCING)) - { - WriteLog (lsINFO, RPCHandler) << "Insufficient network mode for RPC: " << mNetOps->strOperatingMode (); - - return rpcError (rpcNO_NETWORK); - } - - if (!theConfig.RUN_STANDALONE && (commandsA[i].iOptions & optCurrent) && (getApp().getLedgerMaster().getValidatedLedgerAge() > 120)) - { - return rpcError (rpcNO_CURRENT); - } - else if ((commandsA[i].iOptions & optClosed) && !mNetOps->getClosedLedger ()) - { - return rpcError (rpcNO_CLOSED); - } - else - { - try + if ((commandsA[i].iOptions & optNetwork) && (mNetOps->getOperatingMode () < NetworkOPs::omSYNCING)) { - Json::Value jvRaw = (this->* (commandsA[i].dfpFunc)) (params, loadType, MasterLockHolder); + WriteLog (lsINFO, RPCHandler) << "Insufficient network mode for RPC: " << mNetOps->strOperatingMode (); - // Regularize result. - if (jvRaw.isObject ()) - { - // Got an object. - return jvRaw; - } - else - { - // Probably got a string. - Json::Value jvResult (Json::objectValue); - - jvResult["message"] = jvRaw; - - return jvResult; - } + return rpcError (rpcNO_NETWORK); } - catch (std::exception& e) + + if (!theConfig.RUN_STANDALONE && (commandsA[i].iOptions & optCurrent) && (getApp().getLedgerMaster().getValidatedLedgerAge() > 120)) { - WriteLog (lsINFO, RPCHandler) << "Caught throw: " << e.what (); + return rpcError (rpcNO_CURRENT); + } + else if ((commandsA[i].iOptions & optClosed) && !mNetOps->getClosedLedger ()) + { + return rpcError (rpcNO_CLOSED); + } + else + { + try + { + Json::Value jvRaw = (this->* (commandsA[i].dfpFunc)) (params, loadType, lock); - if (*loadType == LT_RPCReference) - *loadType = LT_RPCException; + // Regularize result. + if (jvRaw.isObject ()) + { + // Got an object. + return jvRaw; + } + else + { + // Probably got a string. + Json::Value jvResult (Json::objectValue); - return rpcError (rpcINTERNAL); + jvResult["message"] = jvRaw; + + return jvResult; + } + } + catch (std::exception& e) + { + WriteLog (lsINFO, RPCHandler) << "Caught throw: " << e.what (); + + if (*loadType == LT_RPCReference) + *loadType = LT_RPCException; + + return rpcError (rpcINTERNAL); + } } } } diff --git a/src/cpp/ripple/RPCHandler.h b/src/cpp/ripple/RPCHandler.h index 59d8a59d03..ea39664b1b 100644 --- a/src/cpp/ripple/RPCHandler.h +++ b/src/cpp/ripple/RPCHandler.h @@ -40,7 +40,7 @@ private: typedef Json::Value (RPCHandler::*doFuncPtr) ( Json::Value params, LoadType* loadType, - ScopedLock& MasterLockHolder); + Application::ScopedLockType& MasterLockHolder); // VFALCO TODO Document these and give the enumeration a label. enum @@ -57,7 +57,7 @@ private: boost::unordered_set parseAccountIds (const Json::Value& jvArray); - Json::Value transactionSign (Json::Value jvRequest, bool bSubmit, bool bFailHard, ScopedLock& mlh); + Json::Value transactionSign (Json::Value jvRequest, bool bSubmit, bool bFailHard, Application::ScopedLockType& mlh); Json::Value lookupLedger (Json::Value jvRequest, Ledger::pointer& lpLedger); @@ -89,71 +89,71 @@ private: const int iIndex, const bool bStrict); - Json::Value doAccountInfo (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doAccountLines (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doAccountOffers (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doAccountTransactions (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doBookOffers (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doConnect (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doConsensusInfo (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doFeature (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doFetchInfo (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doGetCounts (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doInternal (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLedger (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLedgerAccept (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLedgerClosed (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLedgerCurrent (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLedgerEntry (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLedgerHeader (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLogLevel (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLogRotate (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doNicknameInfo (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doOwnerInfo (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doPathFind (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doPeers (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doPing (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doProfile (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doProofCreate (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doProofSolve (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doProofVerify (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doRandom (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doRipplePathFind (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doSMS (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doServerInfo (Json::Value params, LoadType* loadType, ScopedLock& mlh); // for humans - Json::Value doServerState (Json::Value params, LoadType* loadType, ScopedLock& mlh); // for machines - Json::Value doSessionClose (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doSessionOpen (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doSign (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doStop (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doSubmit (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doSubscribe (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doTransactionEntry (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doTx (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doTxHistory (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlAdd (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlDelete (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlFetch (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlList (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlLoad (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlNetwork (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlReset (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnlScore (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doUnsubscribe (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doValidationCreate (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doValidationSeed (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doWalletAccounts (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doWalletLock (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doWalletPropose (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doWalletSeed (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doWalletUnlock (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doWalletVerify (Json::Value params, LoadType* loadType, ScopedLock& mlh); + Json::Value doAccountInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doAccountLines (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doAccountOffers (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doAccountTransactions (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doBookOffers (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doConnect (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doConsensusInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doFeature (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doFetchInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doGetCounts (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doInternal (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLedger (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLedgerAccept (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLedgerClosed (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLedgerCurrent (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLedgerEntry (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLedgerHeader (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLogLevel (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLogRotate (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doNicknameInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doOwnerInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doPathFind (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doPeers (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doPing (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doProfile (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doProofCreate (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doProofSolve (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doProofVerify (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doRandom (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doRipplePathFind (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doSMS (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doServerInfo (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); // for humans + Json::Value doServerState (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); // for machines + Json::Value doSessionClose (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doSessionOpen (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doSign (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doStop (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doSubmit (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doSubscribe (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doTransactionEntry (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doTx (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doTxHistory (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlAdd (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlDelete (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlFetch (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlList (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlLoad (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlNetwork (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlReset (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnlScore (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doUnsubscribe (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doValidationCreate (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doValidationSeed (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doWalletAccounts (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doWalletLock (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doWalletPropose (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doWalletSeed (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doWalletUnlock (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doWalletVerify (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); #if ENABLE_INSECURE - Json::Value doDataDelete (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doDataFetch (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doDataStore (Json::Value params, LoadType* loadType, ScopedLock& mlh); - Json::Value doLogin (Json::Value params, LoadType* loadType, ScopedLock& mlh); + Json::Value doDataDelete (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doDataFetch (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doDataStore (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); + Json::Value doLogin (Json::Value params, LoadType* loadType, Application::ScopedLockType& mlh); #endif private: diff --git a/src/cpp/ripple/ripple_Application.cpp b/src/cpp/ripple/ripple_Application.cpp index 8c87ba70bf..a4e37d3e0f 100644 --- a/src/cpp/ripple/ripple_Application.cpp +++ b/src/cpp/ripple/ripple_Application.cpp @@ -16,6 +16,7 @@ class ApplicationImp : public Application , public SharedSingleton , public Validators::Listener + , public NodeStore::Scheduler , LeakChecked { public: @@ -43,17 +44,17 @@ public: #endif , mIOService ((theConfig.NODE_SIZE >= 2) ? 2 : 1) , mIOWork (mIOService) - , mNetOps (&mLedgerMaster) - , m_rpcServerHandler (mNetOps) + , mNetOps (new NetworkOPs (&mLedgerMaster)) + , m_rpcServerHandler (*mNetOps) , mTempNodeCache ("NodeCache", 16384, 90) - , m_nodeStore ( - theConfig.NODE_DB, - theConfig.FASTNODE_DB, - 16384, 300) , mSLECache ("LedgerEntryCache", 4096, 120) , mSNTPClient (mAuxService) , mJobQueue (mIOService) // VFALCO New stuff + , m_nodeStore (NodeStore::New ( + theConfig.nodeDatabase, + theConfig.ephemeralNodeDatabase, + *this)) , m_validators (Validators::New (this)) , mFeatures (IFeatures::New (2 * 7 * 24 * 60 * 60, 200)) // two weeks, 200/256 , mFeeVote (IFeeVote::New (10, 50 * SYSTEM_CURRENCY_PARTS, 12.5 * SYSTEM_CURRENCY_PARTS)) @@ -70,11 +71,6 @@ public: , mTxnDB (NULL) , mLedgerDB (NULL) , mWalletDB (NULL) // VFALCO NOTE are all these 'NULL' ctor params necessary? - , mNetNodeDB (NULL) - , mPathFindDB (NULL) - , mHashNodeDB (NULL) - , mHashNodeLDB (NULL) - , mEphemeralLDB (NULL) , mPeerDoor (NULL) , mRPCDoor (NULL) , mWSPublicDoor (NULL) @@ -88,89 +84,101 @@ public: ~ApplicationImp () { + mNetOps = nullptr; + // VFALCO TODO Wrap these in ScopedPointer delete mTxnDB; delete mLedgerDB; delete mWalletDB; - delete mHashNodeDB; - delete mNetNodeDB; - delete mPathFindDB; - delete mHashNodeLDB; - - if (mEphemeralLDB != nullptr) - delete mEphemeralLDB; } + //-------------------------------------------------------------------------- + + static void callScheduledTask (NodeStore::Scheduler::Task* task, Job&) + { + task->performScheduledTask (); + } + + void scheduleTask (NodeStore::Scheduler::Task* task) + { + getJobQueue ().addJob ( + jtWRITE, + "NodeObject::store", + BIND_TYPE (&ApplicationImp::callScheduledTask, task, P_1)); + } + + //-------------------------------------------------------------------------- + LocalCredentials& getLocalCredentials () { return m_localCredentials ; } - + NetworkOPs& getOPs () { - return mNetOps; + return *mNetOps; } boost::asio::io_service& getIOService () { return mIOService; } - + LedgerMaster& getLedgerMaster () { return mLedgerMaster; } - + InboundLedgers& getInboundLedgers () { return m_inboundLedgers; } - + TransactionMaster& getMasterTransaction () { return mMasterTransaction; } - + NodeCache& getTempNodeCache () { return mTempNodeCache; } - + NodeStore& getNodeStore () { - return m_nodeStore; + return *m_nodeStore; } - + JobQueue& getJobQueue () { return mJobQueue; } - - boost::recursive_mutex& getMasterLock () + + MasterLockType& getMasterLock () { return mMasterLock; } - + ILoadManager& getLoadManager () { return *m_loadManager; } - + TXQueue& getTxnQueue () { return mTxnQueue; } - + PeerDoor& getPeerDoor () { return *mPeerDoor; } - + OrderBookDB& getOrderBookDB () { return mOrderBookDB; } - + SLECache& getSLECache () { return mSLECache; @@ -185,37 +193,37 @@ public: { return *mFeatures; } - + ILoadFeeTrack& getFeeTrack () { return *mFeeTrack; } - + IFeeVote& getFeeVote () { return *mFeeVote; } - + IHashRouter& getHashRouter () { return *mHashRouter; } - + IValidations& getValidations () { return *mValidations; } - + UniqueNodeList& getUNL () { return *mUNL; } - + IProofOfWorkFactory& getProofOfWorkFactory () { return *mProofOfWorkFactory; } - + IPeers& getPeers () { return *mPeers; @@ -247,27 +255,6 @@ public: { return mWalletDB; } - DatabaseCon* getNetNodeDB () - { - return mNetNodeDB; - } - DatabaseCon* getPathFindDB () - { - return mPathFindDB; - } - DatabaseCon* getHashNodeDB () - { - return mHashNodeDB; - } - - leveldb::DB* getHashNodeLDB () - { - return mHashNodeLDB; - } - leveldb::DB* getEphemeralLDB () - { - return mEphemeralLDB; - } bool isShutdown () { @@ -293,16 +280,15 @@ private: // boost::asio::io_service::work mIOWork; - boost::recursive_mutex mMasterLock; + MasterLockType mMasterLock; LocalCredentials m_localCredentials; LedgerMaster mLedgerMaster; InboundLedgers m_inboundLedgers; TransactionMaster mMasterTransaction; - NetworkOPs mNetOps; + ScopedPointer mNetOps; RPCServerHandler m_rpcServerHandler; NodeCache mTempNodeCache; - NodeStore m_nodeStore; SLECache mSLECache; SNTPClient mSNTPClient; JobQueue mJobQueue; @@ -310,29 +296,23 @@ private: OrderBookDB mOrderBookDB; // VFALCO Clean stuff - beast::ScopedPointer m_validators; - beast::ScopedPointer mFeatures; - beast::ScopedPointer mFeeVote; - beast::ScopedPointer mFeeTrack; - beast::ScopedPointer mHashRouter; - beast::ScopedPointer mValidations; - beast::ScopedPointer mUNL; - beast::ScopedPointer mProofOfWorkFactory; - beast::ScopedPointer mPeers; - beast::ScopedPointer m_loadManager; + ScopedPointer m_nodeStore; + ScopedPointer m_validators; + ScopedPointer mFeatures; + ScopedPointer mFeeVote; + ScopedPointer mFeeTrack; + ScopedPointer mHashRouter; + ScopedPointer mValidations; + ScopedPointer mUNL; + ScopedPointer mProofOfWorkFactory; + ScopedPointer mPeers; + ScopedPointer m_loadManager; // VFALCO End Clean stuff DatabaseCon* mRpcDB; DatabaseCon* mTxnDB; DatabaseCon* mLedgerDB; DatabaseCon* mWalletDB; - DatabaseCon* mNetNodeDB; - DatabaseCon* mPathFindDB; - DatabaseCon* mHashNodeDB; - - // VFALCO TODO Wrap this in an interface - leveldb::DB* mHashNodeLDB; - leveldb::DB* mEphemeralLDB; ScopedPointer mPeerDoor; ScopedPointer mRPCDoor; @@ -353,19 +333,11 @@ void ApplicationImp::stop () StopSustain (); mShutdown = true; mIOService.stop (); - // VFALCO TODO We shouldn't have to explicitly call this function. - // The NodeStore destructor should take care of it. - m_nodeStore.waitWrite (); + m_nodeStore = nullptr; mValidations->flush (); mAuxService.stop (); mJobQueue.shutdown (); - delete mHashNodeLDB; - mHashNodeLDB = NULL; - - delete mEphemeralLDB; - mEphemeralLDB = NULL; - WriteLog (lsINFO, Application) << "Stopped: " << mIOService.stopped (); mShutdown = false; } @@ -427,7 +399,7 @@ void ApplicationImp::setup () if (!theConfig.DEBUG_LOGFILE.empty ()) { - // Let BEAST_DEBUG messages go to the file but only WARNING or higher to regular output (unless verbose) + // Let debug messages go to the file but only WARNING or higher to regular output (unless verbose) Log::setLogFile (theConfig.DEBUG_LOGFILE); if (Log::getMinSeverity () > lsDEBUG) @@ -445,16 +417,11 @@ void ApplicationImp::setup () boost::thread t1 (BIND_TYPE (&InitDB, &mRpcDB, "rpc.db", RpcDBInit, RpcDBCount)); boost::thread t2 (BIND_TYPE (&InitDB, &mTxnDB, "transaction.db", TxnDBInit, TxnDBCount)); boost::thread t3 (BIND_TYPE (&InitDB, &mLedgerDB, "ledger.db", LedgerDBInit, LedgerDBCount)); + boost::thread t4 (BIND_TYPE (&InitDB, &mWalletDB, "wallet.db", WalletDBInit, WalletDBCount)); t1.join (); t2.join (); t3.join (); - - boost::thread t4 (BIND_TYPE (&InitDB, &mWalletDB, "wallet.db", WalletDBInit, WalletDBCount)); - boost::thread t6 (BIND_TYPE (&InitDB, &mNetNodeDB, "netnode.db", NetNodeDBInit, NetNodeDBCount)); - boost::thread t7 (BIND_TYPE (&InitDB, &mPathFindDB, "pathfind.db", PathFindDBInit, PathFindDBCount)); t4.join (); - t6.join (); - t7.join (); leveldb::Options options; options.create_if_missing = true; @@ -493,7 +460,7 @@ void ApplicationImp::setup () { // This should probably become the default once we have a stable network if (!theConfig.RUN_STANDALONE) - mNetOps.needNetworkLedger (); + mNetOps->needNetworkLedger (); startNewLedger (); } @@ -515,7 +482,7 @@ void ApplicationImp::setup () getUNL ().nodeBootstrap (); mValidations->tune (theConfig.getSize (siValidationsSize), theConfig.getSize (siValidationsAge)); - m_nodeStore.tune (theConfig.getSize (siNodeCacheSize), theConfig.getSize (siNodeCacheAge)); + m_nodeStore->tune (theConfig.getSize (siNodeCacheSize), theConfig.getSize (siNodeCacheAge)); mLedgerMaster.tune (theConfig.getSize (siLedgerSize), theConfig.getSize (siLedgerAge)); mSLECache.setTargetSize (theConfig.getSize (siSLECacheSize)); mSLECache.setTargetAge (theConfig.getSize (siSLECacheAge)); @@ -624,13 +591,13 @@ void ApplicationImp::setup () { WriteLog (lsWARNING, Application) << "Running in standalone mode"; - mNetOps.setStandAlone (); + mNetOps->setStandAlone (); } else { // VFALCO NOTE the state timer resets the deadlock detector. // - mNetOps.setStateTimer (); + mNetOps->setStateTimer (); } } @@ -646,7 +613,7 @@ void ApplicationImp::run () // VFALCO NOTE This seems unnecessary. If we properly refactor the load // manager then the deadlock detector can just always be "armed" // - getApp().getLoadManager ().activateDeadlockDetector (); + getApp().getLoadManager ().activateDeadlockDetector (); } mIOService.run (); // This blocks @@ -697,7 +664,7 @@ void ApplicationImp::doSweep(Job& j) // mMasterTransaction.sweep (); - m_nodeStore.sweep (); + m_nodeStore->sweep (); mLedgerMaster.sweep (); mTempNodeCache.sweep (); mValidations->sweep (); @@ -705,8 +672,8 @@ void ApplicationImp::doSweep(Job& j) mSLECache.sweep (); AcceptedLedger::sweep (); // VFALCO NOTE AcceptedLedger is/has a singleton? SHAMap::sweep (); // VFALCO NOTE SHAMap is/has a singleton? - mNetOps.sweepFetchPack (); - + mNetOps->sweepFetchPack (); + // VFALCO NOTE does the call to sweep() happen on another thread? mSweepTimer.expires_from_now (boost::posix_time::seconds (theConfig.getSize (siSweepInterval))); mSweepTimer.async_wait (BIND_TYPE (&ApplicationImp::sweep, this)); } @@ -737,7 +704,7 @@ void ApplicationImp::startNewLedger () secondLedger->setAccepted (); mLedgerMaster.pushLedger (secondLedger, boost::make_shared (true, boost::ref (*secondLedger)), false); assert (!!secondLedger->getAccountState (rootAddress)); - mNetOps.setLastCloseTime (secondLedger->getCloseTimeNC ()); + mNetOps->setLastCloseTime (secondLedger->getCloseTimeNC ()); } } @@ -805,7 +772,7 @@ bool ApplicationImp::loadOldLedger (const std::string& l, bool bReplay) Ledger::pointer openLedger = boost::make_shared (false, boost::ref (*loadLedger)); mLedgerMaster.switchLedgers (loadLedger, openLedger); mLedgerMaster.forceValid(loadLedger); - mNetOps.setLastCloseTime (loadLedger->getCloseTimeNC ()); + mNetOps->setLastCloseTime (loadLedger->getCloseTimeNC ()); if (bReplay) { // inject transaction from replayLedger into consensus set @@ -988,15 +955,9 @@ static void addTxnSeqField () void ApplicationImp::updateTables () { - if (theConfig.NODE_DB.empty ()) + if (theConfig.nodeDatabase.size () <= 0) { - Log (lsFATAL) << "The NODE_DB configuration setting MUST be set"; - StopSustain (); - exit (1); - } - else if (theConfig.NODE_DB == "LevelDB" || theConfig.NODE_DB == "SQLite") - { - Log (lsFATAL) << "The NODE_DB setting has been updated, your value is out of date"; + Log (lsFATAL) << "The [node_db] configuration setting has been updated and must be set"; StopSustain (); exit (1); } @@ -1013,8 +974,16 @@ void ApplicationImp::updateTables () exit (1); } - if (!theConfig.DB_IMPORT.empty()) - getApp().getNodeStore().import(theConfig.DB_IMPORT); + if (theConfig.importNodeDatabase.size () > 0) + { + ScopedPointer source (NodeStore::New (theConfig.importNodeDatabase)); + + WriteLog (lsWARNING, NodeObject) << + "Node import from '" << source->getName () << "' to '" + << getApp().getNodeStore().getName () << "'."; + + getApp().getNodeStore().import (*source); + } } //------------------------------------------------------------------------------ diff --git a/src/cpp/ripple/ripple_Application.h b/src/cpp/ripple/ripple_Application.h index 6917442b9e..59a2f62a2b 100644 --- a/src/cpp/ripple/ripple_Application.h +++ b/src/cpp/ripple/ripple_Application.h @@ -38,8 +38,6 @@ typedef TaggedCache SLECach class Application { public: - virtual ~Application () { } - /* VFALCO NOTE The master lock protects: @@ -51,7 +49,110 @@ public: other things */ - virtual boost::recursive_mutex& getMasterLock () = 0; +#if 1 + class ScopedLockType; + + class MasterLockType + { + public: + MasterLockType () + : m_fileName ("") + , m_lineNumber (0) + { + } + + // Note that these are not exactly thread safe. + + char const* getFileName () const noexcept + { + return m_fileName.get (); + } + + int getLineNumber () const noexcept + { + return m_lineNumber.get (); + } + + private: + friend class ScopedLockType; + + void setOwner (char const* fileName, int lineNumber) + { + m_fileName.set (fileName); + m_lineNumber.set (lineNumber); + } + + void resetOwner () + { + m_fileName.set (""); + m_lineNumber.set (0); + } + + boost::recursive_mutex m_mutex; + Atomic m_fileName; + Atomic m_lineNumber; + }; + + class ScopedLockType + { + public: + explicit ScopedLockType (MasterLockType& mutex, + char const* fileName, + int lineNumber) + : m_mutex (mutex) + , m_lock (mutex.m_mutex) + { + mutex.setOwner (fileName, lineNumber); + } + + ~ScopedLockType () + { + if (m_lock.owns_lock ()) + m_mutex.resetOwner (); + } + + void unlock () + { + if (m_lock.owns_lock ()) + m_mutex.resetOwner (); + + m_lock.unlock (); + } + + private: + MasterLockType& m_mutex; + boost::recursive_mutex::scoped_lock m_lock; + }; + +#else + typedef boost::recursive_mutex MasterLockType; + + typedef boost::recursive_mutex::scoped_lock ScopedLockType; + +#endif + + virtual MasterLockType& getMasterLock () = 0; + + + + +public: + struct State + { + // Stuff in here is accessed concurrently and requires a WriteAccess + }; + + typedef SharedData SharedState; + + SharedState& getSharedState () noexcept { return m_sharedState; } + + SharedState const& getSharedState () const noexcept { return m_sharedState; } + +private: + SharedState m_sharedState; + +public: + virtual ~Application () { } virtual boost::asio::io_service& getIOService () = 0; @@ -89,15 +190,9 @@ public: It looks like this is used to store the unique node list. */ // VFALCO TODO Rename, document this + // NOTE This will be replaced by class Validators + // virtual DatabaseCon* getWalletDB () = 0; - // VFALCO NOTE It looks like this isn't used... - //virtual DatabaseCon* getNetNodeDB () = 0; - // VFALCO NOTE It looks like this isn't used... - //virtual DatabaseCon* getPathFindDB () = 0; - virtual DatabaseCon* getHashNodeDB () = 0; - - virtual leveldb::DB* getHashNodeLDB () = 0; - virtual leveldb::DB* getEphemeralLDB () = 0; virtual bool getSystemTimeOffset (int& offset) = 0; virtual bool isShutdown () = 0; diff --git a/src/cpp/ripple/ripple_LedgerConsensus.cpp b/src/cpp/ripple/ripple_LedgerConsensus.cpp index 704ae9c4f7..d400d68766 100644 --- a/src/cpp/ripple/ripple_LedgerConsensus.cpp +++ b/src/cpp/ripple/ripple_LedgerConsensus.cpp @@ -1179,155 +1179,157 @@ void LedgerConsensus::accept (SHAMap::ref set, LoadEvent::pointer) if (set->getHash ().isNonZero ()) // put our set where others can get it later getApp().getOPs ().takePosition (mPreviousLedger->getLedgerSeq (), set); - boost::recursive_mutex::scoped_lock masterLock (getApp().getMasterLock ()); - assert (set->getHash () == mOurPosition->getCurrentHash ()); - - getApp().getOPs ().peekStoredProposals ().clear (); // these are now obsolete - - uint32 closeTime = roundCloseTime (mOurPosition->getCloseTime ()); - bool closeTimeCorrect = true; - - if (closeTime == 0) { - // we agreed to disagree - closeTimeCorrect = false; - closeTime = mPreviousLedger->getCloseTimeNC () + 1; - } + Application::ScopedLockType lock (getApp ().getMasterLock (), __FILE__, __LINE__); - WriteLog (lsDEBUG, LedgerConsensus) << "Report: Prop=" << (mProposing ? "yes" : "no") << " val=" << (mValidating ? "yes" : "no") << - " corLCL=" << (mHaveCorrectLCL ? "yes" : "no") << " fail=" << (mConsensusFail ? "yes" : "no"); - WriteLog (lsDEBUG, LedgerConsensus) << "Report: Prev = " << mPrevLedgerHash << ":" << mPreviousLedger->getLedgerSeq (); - WriteLog (lsDEBUG, LedgerConsensus) << "Report: TxSt = " << set->getHash () << ", close " << closeTime << (closeTimeCorrect ? "" : "X"); + assert (set->getHash () == mOurPosition->getCurrentHash ()); - CanonicalTXSet failedTransactions (set->getHash ()); + getApp().getOPs ().peekStoredProposals ().clear (); // these are now obsolete - Ledger::pointer newLCL = boost::make_shared (false, boost::ref (*mPreviousLedger)); + uint32 closeTime = roundCloseTime (mOurPosition->getCloseTime ()); + bool closeTimeCorrect = true; - newLCL->peekTransactionMap ()->armDirty (); - newLCL->peekAccountStateMap ()->armDirty (); - WriteLog (lsDEBUG, LedgerConsensus) << "Applying consensus set transactions to the last closed ledger"; - applyTransactions (set, newLCL, newLCL, failedTransactions, false); - newLCL->updateSkipList (); - newLCL->setClosed (); - boost::shared_ptr acctNodes = newLCL->peekAccountStateMap ()->disarmDirty (); - boost::shared_ptr txnNodes = newLCL->peekTransactionMap ()->disarmDirty (); - - // write out dirty nodes (temporarily done here) Most come before setAccepted - int fc; - - while ((fc = SHAMap::flushDirty (*acctNodes, 256, hotACCOUNT_NODE, newLCL->getLedgerSeq ())) > 0) - { - WriteLog (lsTRACE, LedgerConsensus) << "Flushed " << fc << " dirty state nodes"; - } - - while ((fc = SHAMap::flushDirty (*txnNodes, 256, hotTRANSACTION_NODE, newLCL->getLedgerSeq ())) > 0) - { - WriteLog (lsTRACE, LedgerConsensus) << "Flushed " << fc << " dirty transaction nodes"; - } - - newLCL->setAccepted (closeTime, mCloseResolution, closeTimeCorrect); - newLCL->updateHash (); - newLCL->setImmutable (); - - WriteLog (lsDEBUG, LedgerConsensus) << "Report: NewL = " << newLCL->getHash () << ":" << newLCL->getLedgerSeq (); - uint256 newLCLHash = newLCL->getHash (); - - if (ShouldLog (lsTRACE, LedgerConsensus)) - { - WriteLog (lsTRACE, LedgerConsensus) << "newLCL"; - Json::Value p; - newLCL->addJson (p, LEDGER_JSON_DUMP_TXRP | LEDGER_JSON_DUMP_STATE); - WriteLog (lsTRACE, LedgerConsensus) << p; - } - - statusChange (protocol::neACCEPTED_LEDGER, *newLCL); - - if (mValidating && !mConsensusFail) - { - uint256 signingHash; - SerializedValidation::pointer v = boost::make_shared - (newLCLHash, getApp().getOPs ().getValidationTimeNC (), mValPublic, mProposing); - v->setFieldU32 (sfLedgerSequence, newLCL->getLedgerSeq ()); - addLoad(v); - - if (((newLCL->getLedgerSeq () + 1) % 256) == 0) // next ledger is flag ledger + if (closeTime == 0) { - getApp().getFeeVote ().doValidation (newLCL, *v); - getApp().getFeatureTable ().doValidation (newLCL, *v); + // we agreed to disagree + closeTimeCorrect = false; + closeTime = mPreviousLedger->getCloseTimeNC () + 1; } - v->sign (signingHash, mValPrivate); - v->setTrusted (); - getApp().getHashRouter ().addSuppression (signingHash); // suppress it if we receive it - getApp().getValidations ().addValidation (v, "local"); - getApp().getOPs ().setLastValidation (v); - Blob validation = v->getSigned (); - protocol::TMValidation val; - val.set_validation (&validation[0], validation.size ()); - int j = getApp().getPeers ().relayMessage (NULL, - boost::make_shared (val, protocol::mtVALIDATION)); - WriteLog (lsINFO, LedgerConsensus) << "CNF Val " << newLCLHash << " to " << j << " peers"; - } - else - WriteLog (lsINFO, LedgerConsensus) << "CNF newLCL " << newLCLHash; + WriteLog (lsDEBUG, LedgerConsensus) << "Report: Prop=" << (mProposing ? "yes" : "no") << " val=" << (mValidating ? "yes" : "no") << + " corLCL=" << (mHaveCorrectLCL ? "yes" : "no") << " fail=" << (mConsensusFail ? "yes" : "no"); + WriteLog (lsDEBUG, LedgerConsensus) << "Report: Prev = " << mPrevLedgerHash << ":" << mPreviousLedger->getLedgerSeq (); + WriteLog (lsDEBUG, LedgerConsensus) << "Report: TxSt = " << set->getHash () << ", close " << closeTime << (closeTimeCorrect ? "" : "X"); - Ledger::pointer newOL = boost::make_shared (true, boost::ref (*newLCL)); - ScopedLock sl ( getApp().getLedgerMaster ().getLock ()); + CanonicalTXSet failedTransactions (set->getHash ()); - // Apply disputed transactions that didn't get in - TransactionEngine engine (newOL); - BOOST_FOREACH (u256_lct_pair & it, mDisputes) - { - if (!it.second->getOurVote ()) + Ledger::pointer newLCL = boost::make_shared (false, boost::ref (*mPreviousLedger)); + + newLCL->peekTransactionMap ()->armDirty (); + newLCL->peekAccountStateMap ()->armDirty (); + WriteLog (lsDEBUG, LedgerConsensus) << "Applying consensus set transactions to the last closed ledger"; + applyTransactions (set, newLCL, newLCL, failedTransactions, false); + newLCL->updateSkipList (); + newLCL->setClosed (); + boost::shared_ptr acctNodes = newLCL->peekAccountStateMap ()->disarmDirty (); + boost::shared_ptr txnNodes = newLCL->peekTransactionMap ()->disarmDirty (); + + // write out dirty nodes (temporarily done here) Most come before setAccepted + int fc; + + while ((fc = SHAMap::flushDirty (*acctNodes, 256, hotACCOUNT_NODE, newLCL->getLedgerSeq ())) > 0) { - // we voted NO - try - { - WriteLog (lsDEBUG, LedgerConsensus) << "Test applying disputed transaction that did not get in"; - SerializerIterator sit (it.second->peekTransaction ()); - SerializedTransaction::pointer txn = boost::make_shared (boost::ref (sit)); + WriteLog (lsTRACE, LedgerConsensus) << "Flushed " << fc << " dirty state nodes"; + } - if (applyTransaction (engine, txn, newOL, true, false)) - failedTransactions.push_back (txn); - } - catch (...) + while ((fc = SHAMap::flushDirty (*txnNodes, 256, hotTRANSACTION_NODE, newLCL->getLedgerSeq ())) > 0) + { + WriteLog (lsTRACE, LedgerConsensus) << "Flushed " << fc << " dirty transaction nodes"; + } + + newLCL->setAccepted (closeTime, mCloseResolution, closeTimeCorrect); + newLCL->updateHash (); + newLCL->setImmutable (); + + WriteLog (lsDEBUG, LedgerConsensus) << "Report: NewL = " << newLCL->getHash () << ":" << newLCL->getLedgerSeq (); + uint256 newLCLHash = newLCL->getHash (); + + if (ShouldLog (lsTRACE, LedgerConsensus)) + { + WriteLog (lsTRACE, LedgerConsensus) << "newLCL"; + Json::Value p; + newLCL->addJson (p, LEDGER_JSON_DUMP_TXRP | LEDGER_JSON_DUMP_STATE); + WriteLog (lsTRACE, LedgerConsensus) << p; + } + + statusChange (protocol::neACCEPTED_LEDGER, *newLCL); + + if (mValidating && !mConsensusFail) + { + uint256 signingHash; + SerializedValidation::pointer v = boost::make_shared + (newLCLHash, getApp().getOPs ().getValidationTimeNC (), mValPublic, mProposing); + v->setFieldU32 (sfLedgerSequence, newLCL->getLedgerSeq ()); + addLoad(v); + + if (((newLCL->getLedgerSeq () + 1) % 256) == 0) // next ledger is flag ledger { - WriteLog (lsDEBUG, LedgerConsensus) << "Failed to apply transaction we voted NO on"; + getApp().getFeeVote ().doValidation (newLCL, *v); + getApp().getFeatureTable ().doValidation (newLCL, *v); + } + + v->sign (signingHash, mValPrivate); + v->setTrusted (); + getApp().getHashRouter ().addSuppression (signingHash); // suppress it if we receive it + getApp().getValidations ().addValidation (v, "local"); + getApp().getOPs ().setLastValidation (v); + Blob validation = v->getSigned (); + protocol::TMValidation val; + val.set_validation (&validation[0], validation.size ()); + int j = getApp().getPeers ().relayMessage (NULL, + boost::make_shared (val, protocol::mtVALIDATION)); + WriteLog (lsINFO, LedgerConsensus) << "CNF Val " << newLCLHash << " to " << j << " peers"; + } + else + WriteLog (lsINFO, LedgerConsensus) << "CNF newLCL " << newLCLHash; + + Ledger::pointer newOL = boost::make_shared (true, boost::ref (*newLCL)); + ScopedLock sl ( getApp().getLedgerMaster ().getLock ()); + + // Apply disputed transactions that didn't get in + TransactionEngine engine (newOL); + BOOST_FOREACH (u256_lct_pair & it, mDisputes) + { + if (!it.second->getOurVote ()) + { + // we voted NO + try + { + WriteLog (lsDEBUG, LedgerConsensus) << "Test applying disputed transaction that did not get in"; + SerializerIterator sit (it.second->peekTransaction ()); + SerializedTransaction::pointer txn = boost::make_shared (boost::ref (sit)); + + if (applyTransaction (engine, txn, newOL, true, false)) + failedTransactions.push_back (txn); + } + catch (...) + { + WriteLog (lsDEBUG, LedgerConsensus) << "Failed to apply transaction we voted NO on"; + } } } - } - WriteLog (lsDEBUG, LedgerConsensus) << "Applying transactions from current open ledger"; - applyTransactions (getApp().getLedgerMaster ().getCurrentLedger ()->peekTransactionMap (), newOL, newLCL, - failedTransactions, true); - getApp().getLedgerMaster ().pushLedger (newLCL, newOL, !mConsensusFail); - mNewLedgerHash = newLCL->getHash (); - mState = lcsACCEPTED; - sl.unlock (); + WriteLog (lsDEBUG, LedgerConsensus) << "Applying transactions from current open ledger"; + applyTransactions (getApp().getLedgerMaster ().getCurrentLedger ()->peekTransactionMap (), newOL, newLCL, + failedTransactions, true); + getApp().getLedgerMaster ().pushLedger (newLCL, newOL, !mConsensusFail); + mNewLedgerHash = newLCL->getHash (); + mState = lcsACCEPTED; + sl.unlock (); - if (mValidating) - { - // see how close our close time is to other node's close time reports - WriteLog (lsINFO, LedgerConsensus) << "We closed at " << boost::lexical_cast (mCloseTime); - uint64 closeTotal = mCloseTime; - int closeCount = 1; - - for (std::map::iterator it = mCloseTimes.begin (), end = mCloseTimes.end (); it != end; ++it) + if (mValidating) { - // FIXME: Use median, not average - WriteLog (lsINFO, LedgerConsensus) << boost::lexical_cast (it->second) << " time votes for " - << boost::lexical_cast (it->first); - closeCount += it->second; - closeTotal += static_cast (it->first) * static_cast (it->second); + // see how close our close time is to other node's close time reports + WriteLog (lsINFO, LedgerConsensus) << "We closed at " << boost::lexical_cast (mCloseTime); + uint64 closeTotal = mCloseTime; + int closeCount = 1; + + for (std::map::iterator it = mCloseTimes.begin (), end = mCloseTimes.end (); it != end; ++it) + { + // FIXME: Use median, not average + WriteLog (lsINFO, LedgerConsensus) << boost::lexical_cast (it->second) << " time votes for " + << boost::lexical_cast (it->first); + closeCount += it->second; + closeTotal += static_cast (it->first) * static_cast (it->second); + } + + closeTotal += (closeCount / 2); + closeTotal /= closeCount; + int offset = static_cast (closeTotal) - static_cast (mCloseTime); + WriteLog (lsINFO, LedgerConsensus) << "Our close offset is estimated at " << offset << " (" << closeCount << ")"; + getApp().getOPs ().closeTimeOffset (offset); } - - closeTotal += (closeCount / 2); - closeTotal /= closeCount; - int offset = static_cast (closeTotal) - static_cast (mCloseTime); - WriteLog (lsINFO, LedgerConsensus) << "Our close offset is estimated at " << offset << " (" << closeCount << ")"; - getApp().getOPs ().closeTimeOffset (offset); } - } void LedgerConsensus::endConsensus () diff --git a/src/cpp/ripple/ripple_LoadManager.cpp b/src/cpp/ripple/ripple_LoadManager.cpp index df4ec66980..4b97611f1c 100644 --- a/src/cpp/ripple/ripple_LoadManager.cpp +++ b/src/cpp/ripple/ripple_LoadManager.cpp @@ -250,6 +250,11 @@ private: static void logDeadlock (int dlTime) { WriteLog (lsWARNING, LoadManager) << "Server stalled for " << dlTime << " seconds."; + + char const* fileName = getApp ().getMasterLock ().getFileName (); + int lineNumber = getApp ().getMasterLock ().getLineNumber (); + + WriteLog (lsWARNING, LoadManager) << "Master lock owned by " << File (fileName).getFileName ().toStdString () << ", line " << lineNumber; } private: diff --git a/src/cpp/ripple/ripple_Main.cpp b/src/cpp/ripple/ripple_Main.cpp index d2789a7ef4..598962ebf8 100644 --- a/src/cpp/ripple/ripple_Main.cpp +++ b/src/cpp/ripple/ripple_Main.cpp @@ -141,22 +141,20 @@ public: /** Run the Beast unit tests. */ -static void runBeastUnitTests () +static void runBeastUnitTests (std::string const& individualTest = "") { RippleUnitTests tr; tr.setAssertOnFailure (false); tr.setPassesAreLogged (false); - tr.runAllTests (); - - // Report - for (int i = 0; i < tr.getNumResults (); ++i) + if (individualTest.empty ()) { - UnitTests::TestResult const& r (*tr.getResult (i)); - - for (int j = 0; j < r.messages.size (); ++i) - Log::out () << r.messages [j].toStdString (); + tr.runAllTests (); + } + else + { + tr.runTest (individualTest.c_str ()); } } @@ -218,6 +216,15 @@ int rippleMain (int argc, char** argv) int iResult = 0; po::variables_map vm; // Map of options. + String importDescription; + { + importDescription << + "Import an existing node database (specified in the " << + "[" << ConfigSection::importNodeDatabase () << "] configuration file section) " + "into the current node database (specified in the " << + "[" << ConfigSection::nodeDatabase () << "] configuration file section). "; + } + // VFALCO TODO Replace boost program options with something from Beast. // // Set up option parsing. @@ -232,7 +239,7 @@ int rippleMain (int argc, char** argv) ("standalone,a", "Run with no peers.") ("testnet,t", "Run in test net mode.") ("unittest,u", "Perform unit tests.") - ("unittest2", "Perform new unit tests.") + ("unittest2", po::value ()->implicit_value (""), "Perform new unit tests.") ("parameters", po::value< vector > (), "Specify comma separated parameters.") ("quiet,q", "Reduce diagnotics.") ("verbose,v", "Verbose logging.") @@ -242,7 +249,7 @@ int rippleMain (int argc, char** argv) ("start", "Start from a fresh Ledger.") ("net", "Get the initial ledger from the network.") ("fg", "Run in the foreground.") - ("import", po::value (), "Import old DB into new DB.") + ("import", importDescription.toStdString ().c_str ()) ; // Interpret positional arguments as --parameters. @@ -250,8 +257,10 @@ int rippleMain (int argc, char** argv) p.add ("parameters", -1); // These must be added before the Application object is created - NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ()); + NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ()); NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ()); + NodeStore::addBackendFactory (NullBackendFactory::getInstance ()); + NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ()); #if RIPPLE_HYPERLEVELDB_AVAILABLE NodeStore::addBackendFactory (HyperLevelDBBackendFactory::getInstance ()); #endif @@ -299,7 +308,7 @@ int rippleMain (int argc, char** argv) if (HaveSustain () && !iResult && !vm.count ("parameters") && !vm.count ("fg") && !vm.count ("standalone") && !vm.count ("unittest")) { - std::string logMe = DoSustain (theConfig.DEBUG_LOGFILE.c_str()); + std::string logMe = DoSustain (theConfig.DEBUG_LOGFILE.string()); if (!logMe.empty ()) Log (lsWARNING) << logMe; @@ -331,7 +340,10 @@ int rippleMain (int argc, char** argv) if (vm.count ("unittest2")) { - runBeastUnitTests (); + std::string const test = vm ["unittest2"].as (); + + runBeastUnitTests (test); + return 0; } @@ -351,8 +363,14 @@ int rippleMain (int argc, char** argv) if (vm.count ("start")) theConfig.START_UP = Config::FRESH; + // Handle a one-time import option + // if (vm.count ("import")) - theConfig.DB_IMPORT = vm["import"].as (); + { + String const optionString (vm ["import"].as ()); + + theConfig.importNodeDatabase = parseDelimitedKeyValueString (optionString); + } if (vm.count ("ledger")) { diff --git a/src/cpp/ripple/ripple_Pathfinder.cpp b/src/cpp/ripple/ripple_Pathfinder.cpp index a2fa47d717..dc798110ec 100644 --- a/src/cpp/ripple/ripple_Pathfinder.cpp +++ b/src/cpp/ripple/ripple_Pathfinder.cpp @@ -818,7 +818,7 @@ int Pathfinder::getPathsOut (const uint160& currencyID, const uint160& accountID return it->second; int aFlags = mLedger->getSLEi(Ledger::getAccountRootIndex(accountID))->getFieldU32(sfFlags); - bool bAuthRequired = aFlags & lsfRequireAuth; + bool const bAuthRequired = (aFlags & lsfRequireAuth) != 0; int count = 0; AccountItems& rippleLines (mRLCache->getRippleLines (accountID)); diff --git a/src/cpp/ripple/ripple_Peer.cpp b/src/cpp/ripple/ripple_Peer.cpp index 227ab61324..afea97c38e 100644 --- a/src/cpp/ripple/ripple_Peer.cpp +++ b/src/cpp/ripple/ripple_Peer.cpp @@ -165,12 +165,12 @@ private: void recvHello (protocol::TMHello & packet); void recvCluster (protocol::TMCluster & packet); - void recvTransaction (protocol::TMTransaction & packet, ScopedLock & MasterLockHolder); - void recvValidation (const boost::shared_ptr& packet, ScopedLock & MasterLockHolder); + void recvTransaction (protocol::TMTransaction & packet, Application::ScopedLockType& masterLockHolder); + void recvValidation (const boost::shared_ptr& packet, Application::ScopedLockType& masterLockHolder); void recvGetValidation (protocol::TMGetValidations & packet); void recvContact (protocol::TMContact & packet); void recvGetContacts (protocol::TMGetContacts & packet); - void recvGetPeers (protocol::TMGetPeers & packet, ScopedLock & MasterLockHolder); + void recvGetPeers (protocol::TMGetPeers & packet, Application::ScopedLockType& masterLockHolder); void recvPeers (protocol::TMPeers & packet); void recvGetObjectByHash (const boost::shared_ptr& packet); void recvPing (protocol::TMPing & packet); @@ -178,8 +178,8 @@ private: void recvSearchTransaction (protocol::TMSearchTransaction & packet); void recvGetAccount (protocol::TMGetAccount & packet); void recvAccount (protocol::TMAccount & packet); - void recvGetLedger (protocol::TMGetLedger & packet, ScopedLock & MasterLockHolder); - void recvLedger (const boost::shared_ptr& packet, ScopedLock & MasterLockHolder); + void recvGetLedger (protocol::TMGetLedger & packet, Application::ScopedLockType& masterLockHolder); + void recvLedger (const boost::shared_ptr& packet, Application::ScopedLockType& masterLockHolder); void recvStatus (protocol::TMStatusChange & packet); void recvPropose (const boost::shared_ptr& packet); void recvHaveTxSet (protocol::TMHaveTransactionSet & packet); @@ -630,8 +630,11 @@ void PeerImp::handleReadBody (const boost::system::error_code& error) WriteLog (lsINFO, Peer) << "Peer: Body: Error: " << getIP () << ": " << error.category ().name () << ": " << error.message () << ": " << error; } - boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ()); - detach ("hrb", true); + { + Application::ScopedLockType lock (getApp ().getMasterLock (), __FILE__, __LINE__); + + detach ("hrb", true); + } return; } @@ -651,276 +654,278 @@ void PeerImp::processReadBuffer () LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtPEER, "PeerImp::read")); - ScopedLock sl (getApp().getMasterLock ()); - - // If connected and get a mtHELLO or if not connected and get a non-mtHELLO, wrong message was sent. - if (mHelloed == (type == protocol::mtHELLO)) { - WriteLog (lsWARNING, Peer) << "Wrong message type: " << type; - detach ("prb1", true); - } - else - { - switch (type) - { - case protocol::mtHELLO: - { - event->reName ("PeerImp::hello"); - protocol::TMHello msg; + Application::ScopedLockType lock (getApp ().getMasterLock (), __FILE__, __LINE__); - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvHello (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; + // If connected and get a mtHELLO or if not connected and get a non-mtHELLO, wrong message was sent. + if (mHelloed == (type == protocol::mtHELLO)) + { + WriteLog (lsWARNING, Peer) << "Wrong message type: " << type; + detach ("prb1", true); } - break; - - case protocol::mtCLUSTER: + else { - event->reName ("PeerImp::cluster"); - protocol::TMCluster msg; + switch (type) + { + case protocol::mtHELLO: + { + event->reName ("PeerImp::hello"); + protocol::TMHello msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvCluster (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvHello (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtERROR_MSG: - { - event->reName ("PeerImp::errormessage"); - protocol::TMErrorMsg msg; + case protocol::mtCLUSTER: + { + event->reName ("PeerImp::cluster"); + protocol::TMCluster msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvErrorMessage (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvCluster (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } - case protocol::mtPING: - { - event->reName ("PeerImp::ping"); - protocol::TMPing msg; + case protocol::mtERROR_MSG: + { + event->reName ("PeerImp::errormessage"); + protocol::TMErrorMsg msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvPing (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvErrorMessage (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtGET_CONTACTS: - { - event->reName ("PeerImp::getcontacts"); - protocol::TMGetContacts msg; + case protocol::mtPING: + { + event->reName ("PeerImp::ping"); + protocol::TMPing msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvGetContacts (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvPing (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtCONTACT: - { - event->reName ("PeerImp::contact"); - protocol::TMContact msg; + case protocol::mtGET_CONTACTS: + { + event->reName ("PeerImp::getcontacts"); + protocol::TMGetContacts msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvContact (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvGetContacts (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtGET_PEERS: - { - event->reName ("PeerImp::getpeers"); - protocol::TMGetPeers msg; + case protocol::mtCONTACT: + { + event->reName ("PeerImp::contact"); + protocol::TMContact msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvGetPeers (msg, sl); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvContact (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtPEERS: - { - event->reName ("PeerImp::peers"); - protocol::TMPeers msg; + case protocol::mtGET_PEERS: + { + event->reName ("PeerImp::getpeers"); + protocol::TMGetPeers msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvPeers (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvGetPeers (msg, lock); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtSEARCH_TRANSACTION: - { - event->reName ("PeerImp::searchtransaction"); - protocol::TMSearchTransaction msg; + case protocol::mtPEERS: + { + event->reName ("PeerImp::peers"); + protocol::TMPeers msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvSearchTransaction (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvPeers (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtGET_ACCOUNT: - { - event->reName ("PeerImp::getaccount"); - protocol::TMGetAccount msg; + case protocol::mtSEARCH_TRANSACTION: + { + event->reName ("PeerImp::searchtransaction"); + protocol::TMSearchTransaction msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvGetAccount (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvSearchTransaction (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtACCOUNT: - { - event->reName ("PeerImp::account"); - protocol::TMAccount msg; + case protocol::mtGET_ACCOUNT: + { + event->reName ("PeerImp::getaccount"); + protocol::TMGetAccount msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvAccount (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvGetAccount (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtTRANSACTION: - { - event->reName ("PeerImp::transaction"); - protocol::TMTransaction msg; + case protocol::mtACCOUNT: + { + event->reName ("PeerImp::account"); + protocol::TMAccount msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvTransaction (msg, sl); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvAccount (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtSTATUS_CHANGE: - { - event->reName ("PeerImp::statuschange"); - protocol::TMStatusChange msg; + case protocol::mtTRANSACTION: + { + event->reName ("PeerImp::transaction"); + protocol::TMTransaction msg; - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvStatus (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvTransaction (msg, lock); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtPROPOSE_LEDGER: - { - event->reName ("PeerImp::propose"); - boost::shared_ptr msg = boost::make_shared (); + case protocol::mtSTATUS_CHANGE: + { + event->reName ("PeerImp::statuschange"); + protocol::TMStatusChange msg; - if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvPropose (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvStatus (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtGET_LEDGER: - { - event->reName ("PeerImp::getledger"); - protocol::TMGetLedger msg; + case protocol::mtPROPOSE_LEDGER: + { + event->reName ("PeerImp::propose"); + boost::shared_ptr msg = boost::make_shared (); - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvGetLedger (msg, sl); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvPropose (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtLEDGER_DATA: - { - event->reName ("PeerImp::ledgerdata"); - boost::shared_ptr msg = boost::make_shared (); + case protocol::mtGET_LEDGER: + { + event->reName ("PeerImp::getledger"); + protocol::TMGetLedger msg; - if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvLedger (msg, sl); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvGetLedger (msg, lock); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtHAVE_SET: - { - event->reName ("PeerImp::haveset"); - protocol::TMHaveTransactionSet msg; + case protocol::mtLEDGER_DATA: + { + event->reName ("PeerImp::ledgerdata"); + boost::shared_ptr msg = boost::make_shared (); - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvHaveTxSet (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvLedger (msg, lock); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtVALIDATION: - { - event->reName ("PeerImp::validation"); - boost::shared_ptr msg = boost::make_shared (); + case protocol::mtHAVE_SET: + { + event->reName ("PeerImp::haveset"); + protocol::TMHaveTransactionSet msg; - if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvValidation (msg, sl); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; -#if 0 + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvHaveTxSet (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - case protocol::mtGET_VALIDATION: - { - protocol::TM msg; + case protocol::mtVALIDATION: + { + event->reName ("PeerImp::validation"); + boost::shared_ptr msg = boost::make_shared (); - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recv (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvValidation (msg, lock); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; + #if 0 -#endif + case protocol::mtGET_VALIDATION: + { + protocol::TM msg; - case protocol::mtGET_OBJECTS: - { - event->reName ("PeerImp::getobjects"); - boost::shared_ptr msg = boost::make_shared (); + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recv (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvGetObjectByHash (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + #endif - case protocol::mtPROOFOFWORK: - { - event->reName ("PeerImp::proofofwork"); - protocol::TMProofWork msg; + case protocol::mtGET_OBJECTS: + { + event->reName ("PeerImp::getobjects"); + boost::shared_ptr msg = boost::make_shared (); - if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) - recvProofWork (msg); - else - WriteLog (lsWARNING, Peer) << "parse error: " << type; - } - break; + if (msg->ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvGetObjectByHash (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; + + case protocol::mtPROOFOFWORK: + { + event->reName ("PeerImp::proofofwork"); + protocol::TMProofWork msg; + + if (msg.ParseFromArray (&mReadbuf[PackedMessage::kHeaderBytes], mReadbuf.size () - PackedMessage::kHeaderBytes)) + recvProofWork (msg); + else + WriteLog (lsWARNING, Peer) << "parse error: " << type; + } + break; - default: - event->reName ("PeerImp::unknown"); - WriteLog (lsWARNING, Peer) << "Unknown Msg: " << type; - WriteLog (lsWARNING, Peer) << strHex (&mReadbuf[0], mReadbuf.size ()); + default: + event->reName ("PeerImp::unknown"); + WriteLog (lsWARNING, Peer) << "Unknown Msg: " << type; + WriteLog (lsWARNING, Peer) << strHex (&mReadbuf[0], mReadbuf.size ()); + } } } } @@ -1111,9 +1116,9 @@ static void checkTransaction (Job&, int flags, SerializedTransaction::pointer st #endif } -void PeerImp::recvTransaction (protocol::TMTransaction& packet, ScopedLock& MasterLockHolder) +void PeerImp::recvTransaction (protocol::TMTransaction& packet, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); Transaction::pointer tx; #ifndef TRUST_NETWORK @@ -1376,9 +1381,9 @@ static void checkValidation (Job&, SerializedValidation::pointer val, uint256 si #endif } -void PeerImp::recvValidation (const boost::shared_ptr& packet, ScopedLock& MasterLockHolder) +void PeerImp::recvValidation (const boost::shared_ptr& packet, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); if (packet->validation ().size () < 50) { @@ -1464,9 +1469,9 @@ void PeerImp::recvGetContacts (protocol::TMGetContacts& packet) // Return a list of your favorite people // TODO: filter out all the LAN peers // TODO: filter out the peer you are talking to -void PeerImp::recvGetPeers (protocol::TMGetPeers& packet, ScopedLock& MasterLockHolder) +void PeerImp::recvGetPeers (protocol::TMGetPeers& packet, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); std::vector addrs; getApp().getPeers ().getTopNAddrs (30, addrs); @@ -1550,7 +1555,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptrisImmutable ()) - MasterLockHolder.unlock (); + masterLockHolder.unlock (); else { WriteLog (lsWARNING, Peer) << "Request for data from mutable ledger"; @@ -2064,9 +2069,9 @@ void PeerImp::recvGetLedger (protocol::TMGetLedger& packet, ScopedLock& MasterLo sendPacket (oPacket, true); } -void PeerImp::recvLedger (const boost::shared_ptr& packet_ptr, ScopedLock& MasterLockHolder) +void PeerImp::recvLedger (const boost::shared_ptr& packet_ptr, Application::ScopedLockType& masterLockHolder) { - MasterLockHolder.unlock (); + masterLockHolder.unlock (); protocol::TMLedgerData& packet = *packet_ptr; if (packet.nodes ().size () <= 0) diff --git a/src/cpp/ripple/ripple_ProofOfWork.cpp b/src/cpp/ripple/ripple_ProofOfWork.cpp index 03f407f7e7..3a6f7a0839 100644 --- a/src/cpp/ripple/ripple_ProofOfWork.cpp +++ b/src/cpp/ripple/ripple_ProofOfWork.cpp @@ -177,4 +177,5 @@ bool ProofOfWork::validateToken (const std::string& strToken) return boost::regex_match (strToken, smMatch, reToken); } -// vim:ts=4 +//------------------------------------------------------------------------------ + diff --git a/src/cpp/ripple/ripple_ProofOfWorkFactory.cpp b/src/cpp/ripple/ripple_ProofOfWorkFactory.cpp index 0cd7c776dd..1df2d79d37 100644 --- a/src/cpp/ripple/ripple_ProofOfWorkFactory.cpp +++ b/src/cpp/ripple/ripple_ProofOfWorkFactory.cpp @@ -231,3 +231,72 @@ IProofOfWorkFactory* IProofOfWorkFactory::New () return new ProofOfWorkFactory; } +//------------------------------------------------------------------------------ + +class ProofOfWorkTests : public UnitTest +{ +public: + ProofOfWorkTests () : UnitTest ("ProofOfWork", "ripple", UnitTest::runManual) + { + } + + void runTest () + { + using namespace ripple; + + ProofOfWorkFactory gen; + ProofOfWork pow = gen.getProof (); + + String s; + + s << "solve difficulty " << String (pow.getDifficulty ()); + beginTest ("solve"); + + uint256 solution = pow.solve (16777216); + + expect (! solution.isZero (), "Should be solved"); + + expect (pow.checkSolution (solution), "Should be checked"); + + // Why is this emitted? + //WriteLog (lsDEBUG, ProofOfWork) << "A bad nonce error is expected"; + + POWResult r = gen.checkProof (pow.getToken (), uint256 ()); + + expect (r == powBADNONCE, "Should show bad nonce for empty solution"); + + expect (gen.checkProof (pow.getToken (), solution) == powOK, "Solution should check with issuer"); + + //WriteLog (lsDEBUG, ProofOfWork) << "A reused nonce error is expected"; + + expect (gen.checkProof (pow.getToken (), solution) == powREUSED, "Reuse solution should be detected"); + + #ifdef SOLVE_POWS + + for (int i = 0; i < 12; ++i) + { + gen.setDifficulty (i); + ProofOfWork pow = gen.getProof (); + WriteLog (lsINFO, ProofOfWork) << "Level: " << i << ", Estimated difficulty: " << pow.getDifficulty (); + uint256 solution = pow.solve (131072); + + if (solution.isZero ()) + { + //WriteLog (lsINFO, ProofOfWork) << "Giving up"; + } + else + { + //WriteLog (lsINFO, ProofOfWork) << "Solution found"; + + if (gen.checkProof (pow.getToken (), solution) != powOK) + { + //WriteLog (lsFATAL, ProofOfWork) << "Solution fails"; + } + } + } + + #endif + } +}; + +static ProofOfWorkTests proofOfWorkTests; diff --git a/src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp b/src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp index e96eccd485..121e1743be 100644 --- a/src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp +++ b/src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp @@ -4,64 +4,3 @@ */ //============================================================================== -BOOST_AUTO_TEST_SUITE (ProofOfWork_suite) - -BOOST_AUTO_TEST_CASE ( ProofOfWork_test ) -{ - using namespace ripple; - - ProofOfWorkFactory gen; - ProofOfWork pow = gen.getProof (); - WriteLog (lsINFO, ProofOfWork) << "Estimated difficulty: " << pow.getDifficulty (); - uint256 solution = pow.solve (16777216); - - if (solution.isZero ()) - BOOST_FAIL ("Unable to solve proof of work"); - - if (!pow.checkSolution (solution)) - BOOST_FAIL ("Solution did not check"); - - WriteLog (lsDEBUG, ProofOfWork) << "A bad nonce error is expected"; - POWResult r = gen.checkProof (pow.getToken (), uint256 ()); - - if (r != powBADNONCE) - { - Log (lsFATAL) << "POWResult = " << static_cast (r); - BOOST_FAIL ("Empty solution didn't show bad nonce"); - } - - if (gen.checkProof (pow.getToken (), solution) != powOK) - BOOST_FAIL ("Solution did not check with issuer"); - - WriteLog (lsDEBUG, ProofOfWork) << "A reused nonce error is expected"; - - if (gen.checkProof (pow.getToken (), solution) != powREUSED) - BOOST_FAIL ("Reuse solution not detected"); - -#ifdef SOLVE_POWS - - for (int i = 0; i < 12; ++i) - { - gen.setDifficulty (i); - ProofOfWork pow = gen.getProof (); - WriteLog (lsINFO, ProofOfWork) << "Level: " << i << ", Estimated difficulty: " << pow.getDifficulty (); - uint256 solution = pow.solve (131072); - - if (solution.isZero ()) - WriteLog (lsINFO, ProofOfWork) << "Giving up"; - else - { - WriteLog (lsINFO, ProofOfWork) << "Solution found"; - - if (gen.checkProof (pow.getToken (), solution) != powOK) - { - WriteLog (lsFATAL, ProofOfWork) << "Solution fails"; - } - } - } - -#endif - -} - -BOOST_AUTO_TEST_SUITE_END () diff --git a/src/cpp/ripple/ripple_SHAMap.cpp b/src/cpp/ripple/ripple_SHAMap.cpp index 3782e7a4a9..e1cc0325bd 100644 --- a/src/cpp/ripple/ripple_SHAMap.cpp +++ b/src/cpp/ripple/ripple_SHAMap.cpp @@ -205,7 +205,7 @@ SHAMapTreeNode::pointer SHAMap::getNode (const SHAMapNode& id, uint256 const& ha if (node) { -#ifdef BEAST_DEBUG +#if BEAST_DEBUG if (node->getNodeHash () != hash) { @@ -213,7 +213,7 @@ SHAMapTreeNode::pointer SHAMap::getNode (const SHAMapNode& id, uint256 const& ha WriteLog (lsFATAL, SHAMap) << "ID: " << id; WriteLog (lsFATAL, SHAMap) << "TgtHash " << hash; WriteLog (lsFATAL, SHAMap) << "NodHash " << node->getNodeHash (); - throw std::runtime_error ("invalid node"); + Throw (std::runtime_error ("invalid node")); } #endif @@ -230,7 +230,7 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has SHAMapTreeNode* ret = getNodePointerNT (id, hash); if (!ret) - throw SHAMapMissingNode (mType, id, hash); + Throw (SHAMapMissingNode (mType, id, hash)); return ret; } @@ -251,7 +251,7 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has SHAMapTreeNode* ret = getNodePointerNT (id, hash, filter); if (!ret) - throw SHAMapMissingNode (mType, id, hash); + Throw (SHAMapMissingNode (mType, id, hash)); return ret; } @@ -493,7 +493,7 @@ SHAMapItem::pointer SHAMap::peekNextItem (uint256 const& id, SHAMapTreeNode::TNT firstNode = firstBelow (firstNode); if (!firstNode || firstNode->isInner ()) - throw std::runtime_error ("missing/corrupt node"); + Throw (std::runtime_error ("missing/corrupt node")); type = firstNode->getType (); return firstNode->peekItem (); @@ -531,7 +531,7 @@ SHAMapItem::pointer SHAMap::peekPrevItem (uint256 const& id) SHAMapTreeNode* item = firstBelow (node.get ()); if (!item) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); return item->peekItem (); } @@ -597,7 +597,7 @@ bool SHAMap::delItem (uint256 const& id) std::stack stack = getStack (id, true); if (stack.empty ()) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); SHAMapTreeNode::pointer leaf = stack.top (); stack.pop (); @@ -678,7 +678,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta std::stack stack = getStack (tag, true); if (stack.empty ()) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); SHAMapTreeNode::pointer node = stack.top (); stack.pop (); @@ -703,7 +703,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta WriteLog (lsFATAL, SHAMap) << "NewNode: " << *newNode; dump (); assert (false); - throw std::runtime_error ("invalid inner node"); + Throw (std::runtime_error ("invalid inner node")); } trackNewNode (newNode); @@ -776,7 +776,7 @@ bool SHAMap::updateGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasM std::stack stack = getStack (tag, true); if (stack.empty ()) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); SHAMapTreeNode::pointer node = stack.top (); stack.pop (); @@ -810,7 +810,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal (const SHAMapNode& id, uint256 SHAMapTreeNode::pointer ret = fetchNodeExternalNT (id, hash); if (!ret) - throw SHAMapMissingNode (mType, id, hash); + Throw (SHAMapMissingNode (mType, id, hash)); return ret; } @@ -822,7 +822,10 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 if (!getApp().running ()) return ret; - NodeObject::pointer obj (getApp().getNodeStore ().retrieve (hash)); + // These are for diagnosing a crash on exit + Application& app (getApp ()); + NodeStore& nodeStore (app.getNodeStore ()); + NodeObject::pointer obj (nodeStore.fetch (hash)); if (!obj) { @@ -885,8 +888,11 @@ bool SHAMap::fetchRoot (uint256 const& hash, SHAMapSyncFilter* filter) } SHAMapTreeNode::pointer newRoot = fetchNodeExternalNT(SHAMapNode(), hash); + if (newRoot) + { root = newRoot; + } else { Blob nodeData; @@ -935,7 +941,7 @@ int SHAMap::flushDirty (DirtyMap& map, int maxNodes, NodeObjectType t, uint32 se #endif - getApp().getNodeStore ().store (t, seq, s.peekData (), it->second->getNodeHash ()); + getApp().getNodeStore ().store (t, seq, s.modData (), it->second->getNodeHash ()); if (flushed++ >= maxNodes) return flushed; diff --git a/src/cpp/ripple/ripple_SHAMapNode.cpp b/src/cpp/ripple/ripple_SHAMapNode.cpp index dcd8c0b3dc..86062eb56e 100644 --- a/src/cpp/ripple/ripple_SHAMapNode.cpp +++ b/src/cpp/ripple/ripple_SHAMapNode.cpp @@ -128,7 +128,7 @@ SHAMapNode SHAMapNode::getChildNodeID (int m) const // Which branch would contain the specified hash int SHAMapNode::selectBranch (uint256 const& hash) const { -#ifdef PARANOID +#if RIPPLE_VERIFY_NODEOBJECT_KEYS if (mDepth >= 64) { diff --git a/src/cpp/ripple/ripple_SHAMapSync.cpp b/src/cpp/ripple/ripple_SHAMapSync.cpp index 941f3b4ee8..d9579600fe 100644 --- a/src/cpp/ripple/ripple_SHAMapSync.cpp +++ b/src/cpp/ripple/ripple_SHAMapSync.cpp @@ -243,7 +243,7 @@ SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode, SHANodeFormat format, { Serializer s; root->addRaw (s, snfPREFIX); - filter->gotNode (false, *root, root->getNodeHash (), s.peekData (), root->getType ()); + filter->gotNode (false, *root, root->getNodeHash (), s.modData (), root->getType ()); } return SHAMapAddNode::useful (); @@ -281,7 +281,7 @@ SHAMapAddNode SHAMap::addRootNode (uint256 const& hash, Blob const& rootNode, SH { Serializer s; root->addRaw (s, snfPREFIX); - filter->gotNode (false, *root, root->getNodeHash (), s.peekData (), root->getType ()); + filter->gotNode (false, *root, root->getNodeHash (), s.modData (), root->getType ()); } return SHAMapAddNode::useful (); @@ -345,7 +345,7 @@ SHAMapAddNode SHAMap::addKnownNode (const SHAMapNode& node, Blob const& rawNode, { Serializer s; newNode->addRaw (s, snfPREFIX); - filter->gotNode (false, node, iNode->getChildHash (branch), s.peekData (), newNode->getType ()); + filter->gotNode (false, node, iNode->getChildHash (branch), s.modData (), newNode->getType ()); } mTNByID[node] = newNode; diff --git a/src/cpp/ripple/ripple_SHAMapSyncFilter.h b/src/cpp/ripple/ripple_SHAMapSyncFilter.h index 1b2ec89b4b..f4c83d6181 100644 --- a/src/cpp/ripple/ripple_SHAMapSyncFilter.h +++ b/src/cpp/ripple/ripple_SHAMapSyncFilter.h @@ -12,29 +12,18 @@ class SHAMapSyncFilter { public: - SHAMapSyncFilter () - { - } - - virtual ~SHAMapSyncFilter () - { - } + virtual ~SHAMapSyncFilter () { } + // Note that the nodeData is overwritten by this call virtual void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, - SHAMapTreeNode::TNType type) - { - } + Blob& nodeData, + SHAMapTreeNode::TNType type) = 0; virtual bool haveNode (SHAMapNode const& id, uint256 const& nodeHash, - Blob& nodeData) - { - return false; - } + Blob& nodeData) = 0; }; #endif -// vim:ts=4 diff --git a/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp b/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp index db451f56c2..157a903f2c 100644 --- a/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp +++ b/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp @@ -9,7 +9,7 @@ ConsensusTransSetSF::ConsensusTransSetSF () } void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNode& id, uint256 const& nodeHash, - Blob const& nodeData, SHAMapTreeNode::TNType type) + Blob& nodeData, SHAMapTreeNode::TNType type) { if (fromFilter) return; @@ -70,7 +70,7 @@ AccountStateSF::AccountStateSF (uint32 ledgerSeq) void AccountStateSF::gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType) { getApp().getNodeStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash); @@ -93,7 +93,7 @@ TransactionStateSF::TransactionStateSF (uint32 ledgerSeq) void TransactionStateSF::gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType type) { getApp().getNodeStore ().store ( diff --git a/src/cpp/ripple/ripple_SHAMapSyncFilters.h b/src/cpp/ripple/ripple_SHAMapSyncFilters.h index d41593bb72..0bf834b8b9 100644 --- a/src/cpp/ripple/ripple_SHAMapSyncFilters.h +++ b/src/cpp/ripple/ripple_SHAMapSyncFilters.h @@ -17,10 +17,11 @@ class ConsensusTransSetSF : public SHAMapSyncFilter public: ConsensusTransSetSF (); + // Note that the nodeData is overwritten by this call void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType); bool haveNode (SHAMapNode const& id, @@ -35,10 +36,11 @@ class AccountStateSF : public SHAMapSyncFilter public: explicit AccountStateSF (uint32 ledgerSeq); + // Note that the nodeData is overwritten by this call void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType); bool haveNode (SHAMapNode const& id, @@ -56,10 +58,11 @@ class TransactionStateSF : public SHAMapSyncFilter public: explicit TransactionStateSF (uint32 ledgerSeq); + // Note that the nodeData is overwritten by this call void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType); bool haveNode (SHAMapNode const& id, diff --git a/src/cpp/ripple/ripple_SHAMapTreeNode.cpp b/src/cpp/ripple/ripple_SHAMapTreeNode.cpp index d9b4b7eb77..1d244f64ee 100644 --- a/src/cpp/ripple/ripple_SHAMapTreeNode.cpp +++ b/src/cpp/ripple/ripple_SHAMapTreeNode.cpp @@ -4,8 +4,14 @@ */ //============================================================================== -SHAMapTreeNode::SHAMapTreeNode (uint32 seq, const SHAMapNode& nodeID) : SHAMapNode (nodeID), mHash (0), - mSeq (seq), mAccessSeq (seq), mType (tnERROR), mIsBranch (0), mFullBelow (false) +SHAMapTreeNode::SHAMapTreeNode (uint32 seq, const SHAMapNode& nodeID) + : SHAMapNode (nodeID) + , mHash (uint64(0)) + , mSeq (seq) + , mAccessSeq (seq) + , mType (tnERROR) + , mIsBranch (0) + , mFullBelow (false) { } @@ -201,7 +207,7 @@ SHAMapTreeNode::SHAMapTreeNode (const SHAMapNode& id, Blob const& rawNode, uint3 if (hashValid) { mHash = hash; -#ifdef PARANOID +#if RIPPLE_VERIFY_NODEOBJECT_KEYS updateHash (); assert (mHash == hash); #endif @@ -219,7 +225,7 @@ bool SHAMapTreeNode::updateHash () if (mIsBranch != 0) { nh = Serializer::getPrefixHash (HashPrefix::innerNode, reinterpret_cast (mHashes), sizeof (mHashes)); -#ifdef PARANOID +#if RIPPLE_VERIFY_NODEOBJECT_KEYS Serializer s; s.add32 (HashPrefix::innerNode); diff --git a/src/cpp/ripple/ripple_TransactionAcquire.cpp b/src/cpp/ripple/ripple_TransactionAcquire.cpp index 26af9f9785..9202a5dc23 100644 --- a/src/cpp/ripple/ripple_TransactionAcquire.cpp +++ b/src/cpp/ripple/ripple_TransactionAcquire.cpp @@ -20,9 +20,13 @@ TransactionAcquire::TransactionAcquire (uint256 const& hash) static void TACompletionHandler (uint256 hash, SHAMap::pointer map) { - boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ()); - getApp().getOPs ().mapComplete (hash, map); - getApp().getInboundLedgers ().dropLedger (hash); + { + Application::ScopedLockType lock (getApp ().getMasterLock (), __FILE__, __LINE__); + + getApp().getOPs ().mapComplete (hash, map); + + getApp().getInboundLedgers ().dropLedger (hash); + } } void TransactionAcquire::done () @@ -51,20 +55,18 @@ void TransactionAcquire::onTimer (bool progress, boost::recursive_mutex::scoped_ if (getTimeouts () > 10) { WriteLog (lsWARNING, TransactionAcquire) << "Ten timeouts on TX set " << getHash (); - { // FIXME: Acquire the master lock here can deadlock - psl.unlock(); - { - boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ()); + psl.unlock(); + { + Application::ScopedLockType lock (getApp().getMasterLock (), __FILE__, __LINE__); - if (getApp().getOPs ().stillNeedTXSet (mHash)) - { - WriteLog (lsWARNING, TransactionAcquire) << "Still need it"; - mTimeouts = 0; - aggressive = true; - } + if (getApp().getOPs ().stillNeedTXSet (mHash)) + { + WriteLog (lsWARNING, TransactionAcquire) << "Still need it"; + mTimeouts = 0; + aggressive = true; } - psl.lock(); } + psl.lock(); if (!aggressive) {