diff --git a/.gitignore b/.gitignore
index 6157c1787e..37b1e0a575 100644
--- a/.gitignore
+++ b/.gitignore
@@ -63,3 +63,7 @@ DerivedData
# Intel Parallel Studio 2013 XE
My Amplifier XE Results - RippleD
+
+# KeyvaDB files
+*.key
+*.val
diff --git a/BeastConfig.h b/BeastConfig.h
index 47adbde46b..3b82014da9 100644
--- a/BeastConfig.h
+++ b/BeastConfig.h
@@ -120,6 +120,19 @@
//#define BEAST_BIND_USES_TR1 1
//#define BEAST_BIND_USES_BOOST 1
-//#define BEAST_UNIT_TESTS 1
+//------------------------------------------------------------------------------
+//
+// Ripple compilation settings
+//
+//------------------------------------------------------------------------------
+
+/** Config: RIPPLE_VERIFY_NODEOBJECT_KEYS
+
+ This verifies that the hash of node objects matches the payload.
+ It is quite expensive so normally this is turned off!
+*/
+#ifndef RIPPLE_VERIFY_NODEOBJECT_KEYS
+//#define RIPPLE_VERIFY_NODEOBJECT_KEYS 1
+#endif
#endif
diff --git a/Builds/QtCreator/rippled.pro b/Builds/QtCreator/rippled.pro
index 89ebd28df4..9858ac395a 100644
--- a/Builds/QtCreator/rippled.pro
+++ b/Builds/QtCreator/rippled.pro
@@ -63,6 +63,7 @@ SOURCES += \
../../Subtrees/beast/modules/beast_basics/beast_basics.cpp \
../../Subtrees/beast/modules/beast_core/beast_core.cpp \
../../Subtrees/beast/modules/beast_crypto/beast_crypto.cpp \
+ ../../Subtrees/beast/modules/beast_db/beast_db.cpp \
../../modules/ripple_app/ripple_app_pt1.cpp \
../../modules/ripple_app/ripple_app_pt2.cpp \
../../modules/ripple_app/ripple_app_pt3.cpp \
diff --git a/Builds/VisualStudio2012/RippleD.vcxproj b/Builds/VisualStudio2012/RippleD.vcxproj
index e7e4b5088f..bdf45f6cc1 100644
--- a/Builds/VisualStudio2012/RippleD.vcxproj
+++ b/Builds/VisualStudio2012/RippleD.vcxproj
@@ -157,6 +157,24 @@
true
true
+
+ true
+ true
+ true
+ true
+
+
+ true
+ true
+ true
+ true
+
+
+ true
+ true
+ true
+ true
+
true
true
@@ -802,12 +820,6 @@
true
true
-
- true
- true
- true
- true
-
true
true
@@ -1027,6 +1039,7 @@
+
true
true
@@ -1402,6 +1415,9 @@
+
+
+
@@ -1441,6 +1457,7 @@
+
@@ -1732,7 +1749,7 @@
Disabled
- _CRTDBG_MAP_ALLOC;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
ProgramDatabase
false
MultiThreadedDebug
diff --git a/Builds/VisualStudio2012/RippleD.vcxproj.filters b/Builds/VisualStudio2012/RippleD.vcxproj.filters
index 7d7ff164cf..beaa45d3af 100644
--- a/Builds/VisualStudio2012/RippleD.vcxproj.filters
+++ b/Builds/VisualStudio2012/RippleD.vcxproj.filters
@@ -594,9 +594,6 @@
[1] Ripple\ripple_app\_misc
-
- [1] Ripple\ripple_app\_misc
-
[1] Ripple\ripple_app\_misc
@@ -807,9 +804,6 @@
[1] Ripple\ripple_app\node
-
- [1] Ripple\ripple_app\node
-
[1] Ripple\ripple_mdb
@@ -897,6 +891,21 @@
[1] Ripple\ripple_app\node
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_app\node
+
+
+ [0] Subtrees\beast
+
@@ -963,9 +972,6 @@
[1] Ripple\ripple_basics\utility
-
- [1] Ripple\ripple_basics\utility
-
[1] Ripple\ripple_basics\utility
@@ -1581,9 +1587,6 @@
[1] Ripple\ripple_app\node
-
- [1] Ripple\ripple_app\node
-
[1] Ripple\ripple_mdb
@@ -1674,6 +1677,24 @@
[1] Ripple\ripple_app\node
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_app\node
+
+
+ [1] Ripple\ripple_basics\utility
+
+
+ [1] Ripple\ripple_core\functional
+
diff --git a/SConstruct b/SConstruct
index 903d5bf877..69461f7c6f 100644
--- a/SConstruct
+++ b/SConstruct
@@ -122,6 +122,7 @@ COMPILED_FILES = [
'Subtrees/beast/modules/beast_basics/beast_basics.cpp',
'Subtrees/beast/modules/beast_core/beast_core.cpp',
'Subtrees/beast/modules/beast_crypto/beast_crypto.cpp',
+ 'Subtrees/beast/modules/beast_db/beast_db.cpp',
'modules/ripple_app/ripple_app_pt1.cpp',
'modules/ripple_app/ripple_app_pt2.cpp',
'modules/ripple_app/ripple_app_pt3.cpp',
diff --git a/Subtrees/README.md b/Subtrees/README.md
index 51435b4def..457688b264 100644
--- a/Subtrees/README.md
+++ b/Subtrees/README.md
@@ -21,6 +21,19 @@ Branch
ripple-fork
```
+## LightningDB (a.k.a. MDB)
+
+A supposedly fast memory-mapped key value database system
+
+Repository
+```
+git://gitorious.org/mdb/mdb.git
+```
+Branch
+```
+mdb.master
+```
+
## websocket
Ripple's fork of websocketpp has some incompatible changes and Ripple specific includes.
diff --git a/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h b/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h
index ebf0d9a7bd..0a19d0402d 100644
--- a/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h
+++ b/Subtrees/beast/Builds/VisualStudio2012/BeastConfig.h
@@ -120,6 +120,4 @@
//#define BEAST_BIND_USES_TR1 1
//#define BEAST_BIND_USES_BOOST 1
-#define BEAST_UNIT_TESTS 1
-
#endif
diff --git a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj
index 8309bb27d2..99c88c4500 100644
--- a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj
+++ b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj
@@ -78,6 +78,12 @@
true
true
+
+ true
+ true
+ true
+ true
+
@@ -134,12 +140,14 @@
+
+
@@ -162,6 +170,7 @@
+
@@ -246,6 +255,8 @@
+
+
@@ -407,6 +418,12 @@
true
true
+
+ true
+ true
+ true
+ true
+
true
true
@@ -437,6 +454,12 @@
true
true
+
+ true
+ true
+ true
+ true
+
true
true
@@ -918,6 +941,13 @@
true
true
+
+
+ true
+ true
+ true
+ true
+
diff --git a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters
index 7b777f5085..7f02acb998 100644
--- a/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters
+++ b/Subtrees/beast/Builds/VisualStudio2012/beast.vcxproj.filters
@@ -36,6 +36,9 @@
beast_basics
+
+ beast_db
+
@@ -125,6 +128,12 @@
{1170f2bc-2456-410a-ab2b-c45f6ed37b9e}
+
+ {4834218f-f13f-41bc-a8a0-50314a3a99a3}
+
+
+ {15a98fee-1b52-45eb-9480-514b8750d755}
+
@@ -623,6 +632,21 @@
beast_core\containers
+
+ beast_core\files
+
+
+ beast_core\diagnostic
+
+
+ beast_core\memory
+
+
+ beast_db
+
+
+ beast_db\keyvalue
+
@@ -967,6 +991,18 @@
beast_crypto\math
+
+ beast_core\files
+
+
+ beast_core\diagnostic
+
+
+ beast_db
+
+
+ beast_db\keyvalue
+
diff --git a/Subtrees/beast/TODO.txt b/Subtrees/beast/TODO.txt
index a5faea1456..e4a9e8f073 100644
--- a/Subtrees/beast/TODO.txt
+++ b/Subtrees/beast/TODO.txt
@@ -2,6 +2,12 @@
BEAST TODO
--------------------------------------------------------------------------------
+- Specialize UnsignedInteger<> for peformance in the storage format
+
+- Macro for acquiring a ScopedLock that records file and line.
+
+- Rename HeapBlock routines to not conflict with _CRTDBG_MAP_ALLOC macros
+
- Design a WeakPtr / SharedPtr / SharedObject intrusive system
- Implement beast::Bimap?
diff --git a/Subtrees/beast/modules/beast_core/beast_core.cpp b/Subtrees/beast/modules/beast_core/beast_core.cpp
index 82966a182e..f2387873fe 100644
--- a/Subtrees/beast/modules/beast_core/beast_core.cpp
+++ b/Subtrees/beast/modules/beast_core/beast_core.cpp
@@ -149,12 +149,14 @@ namespace beast
#include "diagnostic/beast_FPUFlags.cpp"
#include "diagnostic/beast_LeakChecked.cpp"
#include "diagnostic/beast_UnitTest.cpp"
+#include "diagnostic/beast_UnitTestUtilities.cpp"
#include "files/beast_DirectoryIterator.cpp"
#include "files/beast_File.cpp"
#include "files/beast_FileInputStream.cpp"
#include "files/beast_FileOutputStream.cpp"
#include "files/beast_FileSearchPath.cpp"
+#include "files/beast_RandomAccessFile.cpp"
#include "files/beast_TemporaryFile.cpp"
#include "json/beast_JSON.cpp"
diff --git a/Subtrees/beast/modules/beast_core/beast_core.h b/Subtrees/beast/modules/beast_core/beast_core.h
index a0d9a3042f..c19c149f27 100644
--- a/Subtrees/beast/modules/beast_core/beast_core.h
+++ b/Subtrees/beast/modules/beast_core/beast_core.h
@@ -226,6 +226,7 @@ namespace beast
#include "diagnostic/beast_Error.h"
#include "diagnostic/beast_FPUFlags.h"
#include "diagnostic/beast_UnitTest.h"
+#include "diagnostic/beast_UnitTestUtilities.h"
#include "diagnostic/beast_Throw.h"
#include "containers/beast_AbstractFifo.h"
#include "containers/beast_Array.h"
@@ -252,6 +253,7 @@ namespace beast
#include "files/beast_FileOutputStream.h"
#include "files/beast_FileSearchPath.h"
#include "files/beast_MemoryMappedFile.h"
+#include "files/beast_RandomAccessFile.h"
#include "files/beast_TemporaryFile.h"
#include "json/beast_JSON.h"
#include "logging/beast_FileLogger.h"
@@ -274,6 +276,7 @@ namespace beast
#include "memory/beast_WeakReference.h"
#include "memory/beast_MemoryAlignment.h"
#include "memory/beast_CacheLine.h"
+#include "memory/beast_RecycledObjectPool.h"
#include "misc/beast_Result.h"
#include "misc/beast_Uuid.h"
#include "misc/beast_WindowsRegistry.h"
diff --git a/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp b/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp
index 0e3dd1236d..32f3ff5f70 100644
--- a/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp
+++ b/Subtrees/beast/modules/beast_core/containers/beast_AbstractFifo.cpp
@@ -129,7 +129,7 @@ void AbstractFifo::finishedRead (int numRead) noexcept
class AbstractFifoTests : public UnitTest
{
public:
- AbstractFifoTests() : UnitTest ("Abstract Fifo")
+ AbstractFifoTests() : UnitTest ("Abstract Fifo", "beast")
{
}
@@ -224,6 +224,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static AbstractFifoTests abstractFifoTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp
index 41903dc311..8905ed2a12 100644
--- a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp
+++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.cpp
@@ -21,8 +21,13 @@
*/
//==============================================================================
-UnitTest::UnitTest (const String& name_)
- : name (name_), runner (nullptr)
+UnitTest::UnitTest (String const& name,
+ String const& group,
+ When when)
+ : m_name (name)
+ , m_group (group)
+ , m_when (when)
+ , m_runner (nullptr)
{
getAllTests().add (this);
}
@@ -32,19 +37,25 @@ UnitTest::~UnitTest()
getAllTests().removeFirstMatchingValue (this);
}
-Array& UnitTest::getAllTests()
+UnitTest::TestList& UnitTest::getAllTests()
{
- static Array tests;
- return tests;
+ static TestList s_tests;
+
+ return s_tests;
}
-void UnitTest::initialise() {}
-void UnitTest::shutdown() {}
-
-void UnitTest::performTest (UnitTests* const runner_)
+void UnitTest::initialise()
{
- bassert (runner_ != nullptr);
- runner = runner_;
+}
+
+void UnitTest::shutdown()
+{
+}
+
+void UnitTest::performTest (UnitTests* const runner)
+{
+ bassert (runner != nullptr);
+ m_runner = runner;
initialise();
runTest();
@@ -53,23 +64,24 @@ void UnitTest::performTest (UnitTests* const runner_)
void UnitTest::logMessage (const String& message)
{
- runner->logMessage (message);
+ m_runner->logMessage (message);
}
void UnitTest::beginTest (const String& testName)
{
- runner->beginNewTest (this, testName);
+ m_runner->beginNewTest (this, testName);
}
void UnitTest::expect (const bool result, const String& failureMessage)
{
if (result)
- runner->addPass();
+ m_runner->addPass();
else
- runner->addFail (failureMessage);
+ m_runner->addFail (failureMessage);
}
//==============================================================================
+
UnitTests::UnitTests()
: currentTest (nullptr),
assertOnFailure (true),
@@ -105,8 +117,52 @@ void UnitTests::resultsUpdated()
{
}
-void UnitTests::runTests (const Array& tests)
+void UnitTests::runTest (UnitTest& test)
{
+ try
+ {
+ test.performTest (this);
+ }
+ catch (std::exception& e)
+ {
+ String s;
+ s << "Got an exception: " << e.what ();
+ addFail (s);
+ }
+ catch (...)
+ {
+ addFail ("Got an unhandled exception");
+ }
+}
+
+void UnitTests::runTest (String const& name)
+{
+ results.clear();
+ resultsUpdated();
+
+ UnitTest::TestList& tests (UnitTest::getAllTests ());
+
+ for (int i = 0; i < tests.size(); ++i)
+ {
+ UnitTest& test = *tests [i];
+
+ if (test.getGroup () == name && test.getWhen () == UnitTest::runAlways)
+ {
+ runTest (test);
+ }
+ else if (test.getName () == name)
+ {
+ runTest (test);
+ break;
+ }
+
+ }
+}
+
+void UnitTests::runAllTests ()
+{
+ UnitTest::TestList& tests (UnitTest::getAllTests ());
+
results.clear();
resultsUpdated();
@@ -115,22 +171,14 @@ void UnitTests::runTests (const Array& tests)
if (shouldAbortTests())
break;
- try
- {
- tests.getUnchecked(i)->performTest (this);
- }
- catch (...)
- {
- addFail ("An unhandled exception was thrown!");
- }
+ UnitTest& test = *tests [i];
+
+ if (test.getWhen () == UnitTest::runAlways)
+ runTest (test);
}
endTest();
-}
-void UnitTests::runAllTests()
-{
- runTests (UnitTest::getAllTests());
}
void UnitTests::logMessage (const String& message)
@@ -150,14 +198,14 @@ void UnitTests::beginNewTest (UnitTest* const test, const String& subCategory)
TestResult* const r = new TestResult();
results.add (r);
- r->unitTestName = test->getName();
+ r->unitTestName = test->getGroup() + "::" + test->getName();
r->subcategoryName = subCategory;
r->passes = 0;
r->failures = 0;
- logMessage ("Test: " + r->unitTestName + "/" + subCategory + "...");
+ logMessage ("Test '" + r->unitTestName + "': " + subCategory);
- resultsUpdated();
+ resultsUpdated ();
}
void UnitTests::endTest()
@@ -214,8 +262,8 @@ void UnitTests::addFail (const String& failureMessage)
r->failures++;
- String message ("!!! Test ");
- message << (r->failures + r->passes) << " failed";
+ String message ("Failure, #");
+ message << (r->failures + r->passes);
if (failureMessage.isNotEmpty())
message << ": " << failureMessage;
diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h
index 237f1ba89e..f7e8466c18 100644
--- a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h
+++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTest.h
@@ -28,7 +28,6 @@
#include "../containers/beast_OwnedArray.h"
class UnitTests;
-
/** This is a base class for classes that perform a unit test.
To write a test using this class, your code should look something like this:
@@ -56,9 +55,10 @@ class UnitTests;
}
};
- // Explicit template instantiation is required to make the unit
- // test get automatically added to the set of unit tests.
- template class UnitTestType ;
+ // This makes the unit test available in the global list
+ // It doesn't have to be static.
+ //
+ static MyTest myTest;
@endcode
@@ -69,15 +69,38 @@ class UnitTests;
class BEAST_API UnitTest : Uncopyable
{
public:
+ enum When
+ {
+ runAlways,
+ runManual
+ };
+
+ /** The type of a list of tests.
+ */
+ typedef Array TestList;
+
//==============================================================================
- /** Creates a test with the given name. */
- explicit UnitTest (String const& name);
+ /** Creates a test with the given name, group, and run option.
+
+ The group is used when you want to run all tests in a particular group
+ instead of all tests in general. The run option allows you to write some
+ tests that are only available manually. For examplem, a performance unit
+ test that takes a long time which you might not want to run every time
+ you run all tests.
+ */
+ explicit UnitTest (String const& name, String const& group = "", When when = runAlways);
/** Destructor. */
virtual ~UnitTest();
/** Returns the name of the test. */
- const String& getName() const noexcept { return name; }
+ const String& getName() const noexcept { return m_name; }
+
+ /** Returns the group of the test. */
+ String const& getGroup () const noexcept { return m_group; }
+
+ /** Returns the run option of the test. */
+ When getWhen () const noexcept { return m_when; }
/** Runs the test, using the specified UnitTests.
You shouldn't need to call this method directly - use
@@ -86,7 +109,7 @@ public:
void performTest (UnitTests* runner);
/** Returns the set of all UnitTest objects that currently exist. */
- static Array& getAllTests();
+ static TestList& getAllTests();
//==============================================================================
/** You can optionally implement this method to set up your test.
@@ -155,14 +178,16 @@ public:
//==============================================================================
/** Writes a message to the test log.
- This can only be called from within your runTest() method.
+ This can only be called during your runTest() method.
*/
void logMessage (const String& message);
private:
//==============================================================================
- const String name;
- UnitTests* runner;
+ String const m_name;
+ String const m_group;
+ When const m_when;
+ UnitTests* m_runner;
};
//==============================================================================
@@ -187,12 +212,14 @@ public:
/** Destructor. */
virtual ~UnitTests();
- /** Runs a set of tests.
-
- The tests are performed in order, and the results are logged. To run all the
- registered UnitTest objects that exist, use runAllTests().
+ /** Run the specified unit test.
+
+ Subclasses can override this to do extra stuff.
*/
- void runTests (const Array& tests);
+ virtual void runTest (UnitTest& test);
+
+ /** Run a particular test or group. */
+ void runTest (String const& name);
/** Runs all the UnitTest objects that currently exist.
This calls runTests() for all the objects listed in UnitTest::getAllTests().
diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.cpp b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.cpp
new file mode 100644
index 0000000000..0a553cffa4
--- /dev/null
+++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.cpp
@@ -0,0 +1,56 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+class UnitTestUtilitiesTests : public UnitTest
+{
+public:
+ UnitTestUtilitiesTests () : UnitTest ("UnitTestUtilities", "beast")
+ {
+ }
+
+ void testPayload ()
+ {
+ using namespace UnitTestUtilities;
+
+ int const maxBufferSize = 4000;
+ int const minimumBytes = 1;
+ int const numberOfItems = 100;
+ int64 const seedValue = 50;
+
+ beginTest ("Payload");
+
+ Payload p1 (maxBufferSize);
+ Payload p2 (maxBufferSize);
+
+ for (int i = 0; i < numberOfItems; ++i)
+ {
+ p1.repeatableRandomFill (minimumBytes, maxBufferSize, seedValue);
+ p2.repeatableRandomFill (minimumBytes, maxBufferSize, seedValue);
+
+ expect (p1 == p2, "Should be equal");
+ }
+ }
+
+ void runTest ()
+ {
+ testPayload ();
+ }
+};
+
+static UnitTestUtilitiesTests unitTestUtilitiesTests;
diff --git a/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.h b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.h
new file mode 100644
index 0000000000..b2fa7792c0
--- /dev/null
+++ b/Subtrees/beast/modules/beast_core/diagnostic/beast_UnitTestUtilities.h
@@ -0,0 +1,100 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_UNITTESTUTILITIES_H_INCLUDED
+#define BEAST_UNITTESTUTILITIES_H_INCLUDED
+
+#include "../maths/beast_Random.h"
+
+namespace UnitTestUtilities
+{
+
+/** Fairly shuffle an array pseudo-randomly.
+*/
+template
+void repeatableShuffle (int const numberOfItems, T& arrayOfItems, int64 seedValue)
+{
+ Random r (seedValue);
+
+ for (int i = numberOfItems - 1; i > 0; --i)
+ {
+ int const choice = r.nextInt (i + 1);
+
+ std::swap (arrayOfItems [i], arrayOfItems [choice]);
+ }
+}
+
+/** A block of memory used for test data.
+*/
+struct Payload
+{
+ /** Construct a payload with a buffer of the specified maximum size.
+
+ @param maximumBytes The size of the buffer, in bytes.
+ */
+ explicit Payload (int maxBufferSize)
+ : bufferSize (maxBufferSize)
+ , data (maxBufferSize)
+ {
+ }
+
+ /** Generate a random block of data within a certain size range.
+
+ @param minimumBytes The smallest number of bytes in the resulting payload.
+ @param maximumBytes The largest number of bytes in the resulting payload.
+ @param seedValue The value to seed the random number generator with.
+ */
+ void repeatableRandomFill (int minimumBytes, int maximumBytes, int64 seedValue) noexcept
+ {
+ bassert (minimumBytes >=0 && maximumBytes <= bufferSize);
+
+ Random r (seedValue);
+
+ bytes = minimumBytes + r.nextInt (1 + maximumBytes - minimumBytes);
+
+ bassert (bytes >= minimumBytes && bytes <= bufferSize);
+
+ for (int i = 0; i < bytes; ++i)
+ data [i] = static_cast (r.nextInt ());
+ }
+
+ /** Compare two payloads for equality.
+ */
+ bool operator== (Payload const& other) const noexcept
+ {
+ if (bytes == other.bytes)
+ {
+ return memcmp (data.getData (), other.data.getData (), bytes) == 0;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+public:
+ int const bufferSize;
+
+ int bytes;
+ HeapBlock data;
+};
+
+}
+
+#endif
diff --git a/Subtrees/beast/modules/beast_core/files/beast_File.cpp b/Subtrees/beast/modules/beast_core/files/beast_File.cpp
index 7ef709e324..55bc1cafbd 100644
--- a/Subtrees/beast/modules/beast_core/files/beast_File.cpp
+++ b/Subtrees/beast/modules/beast_core/files/beast_File.cpp
@@ -926,7 +926,7 @@ MemoryMappedFile::MemoryMappedFile (const File& file, const Range& fileRa
class FileTests : public UnitTest
{
public:
- FileTests() : UnitTest ("File") {}
+ FileTests() : UnitTest ("File", "beast") {}
void runTest()
{
@@ -1106,7 +1106,5 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static FileTests fileTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp
index 368eb9c438..a3a0e5e3c6 100644
--- a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp
+++ b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.cpp
@@ -114,7 +114,7 @@ bool FileOutputStream::write (const void* const src, const size_t numBytes)
return true;
}
-void FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes)
+bool FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes)
{
bassert (((ssize_t) numBytes) >= 0);
@@ -123,9 +123,8 @@ void FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes)
memset (buffer + bytesInBuffer, byte, numBytes);
bytesInBuffer += numBytes;
currentPosition += numBytes;
+ return true;
}
- else
- {
- OutputStream::writeRepeatedByte (byte, numBytes);
- }
-}
+
+ return OutputStream::writeRepeatedByte (byte, numBytes);
+}
\ No newline at end of file
diff --git a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h
index 5f358ecd63..e4110492c9 100644
--- a/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h
+++ b/Subtrees/beast/modules/beast_core/files/beast_FileOutputStream.h
@@ -27,7 +27,6 @@
#include "beast_File.h"
#include "../streams/beast_OutputStream.h"
-
//==============================================================================
/**
An output stream that writes into a local file.
@@ -87,11 +86,11 @@ public:
Result truncate();
//==============================================================================
- void flush();
- int64 getPosition();
- bool setPosition (int64 pos);
- bool write (const void* data, size_t numBytes);
- void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
+ void flush() override;
+ int64 getPosition() override;
+ bool setPosition (int64) override;
+ bool write (const void*, size_t) override;
+ bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) override;
private:
@@ -111,4 +110,4 @@ private:
ssize_t writeInternal (const void*, size_t);
};
-#endif // BEAST_FILEOUTPUTSTREAM_BEASTHEADER
+#endif
\ No newline at end of file
diff --git a/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.cpp b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.cpp
new file mode 100644
index 0000000000..28c029cd8b
--- /dev/null
+++ b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.cpp
@@ -0,0 +1,272 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+RandomAccessFile::RandomAccessFile () noexcept
+ : fileHandle (nullptr)
+ , currentPosition (0)
+{
+}
+
+RandomAccessFile::~RandomAccessFile ()
+{
+ close ();
+}
+
+Result RandomAccessFile::open (File const& path, Mode mode)
+{
+ close ();
+
+ return nativeOpen (path, mode);
+}
+
+void RandomAccessFile::close ()
+{
+ if (isOpen ())
+ {
+ nativeFlush ();
+ nativeClose ();
+ }
+}
+
+Result RandomAccessFile::setPosition (FileOffset newPosition)
+{
+ if (newPosition != currentPosition)
+ {
+ // VFALCO NOTE I dislike return from the middle but
+ // Result::ok() is showing up in the profile
+ //
+ return nativeSetPosition (newPosition);
+ }
+
+ return Result::ok ();
+}
+
+Result RandomAccessFile::read (void* buffer, ByteCount numBytes, ByteCount* pActualAmount)
+{
+ return nativeRead (buffer, numBytes, pActualAmount);
+}
+
+Result RandomAccessFile::write (const void* data, ByteCount numBytes, ByteCount* pActualAmount)
+{
+ bassert (data != nullptr && ((ssize_t) numBytes) >= 0);
+
+ Result result (Result::ok ());
+
+ ByteCount amountWritten = 0;
+
+ result = nativeWrite (data, numBytes, &amountWritten);
+
+ if (result.wasOk ())
+ currentPosition += amountWritten;
+
+ if (pActualAmount != nullptr)
+ *pActualAmount = amountWritten;
+
+ return result;
+}
+
+Result RandomAccessFile::truncate ()
+{
+ Result result = flush ();
+
+ if (result.wasOk ())
+ result = nativeTruncate ();
+
+ return result;
+}
+
+Result RandomAccessFile::flush ()
+{
+ return nativeFlush ();
+}
+
+//------------------------------------------------------------------------------
+
+class RandomAccessFileTests : public UnitTest
+{
+public:
+ RandomAccessFileTests () : UnitTest ("RandomAccessFile", "beast")
+ {
+ }
+
+ enum
+ {
+ maxPayload = 8192
+ };
+
+ /* For this test we will create a file which consists of a fixed
+ number of variable length records. Each record is numbered sequentially
+ starting at 0. To calculate the position of each record we first build
+ a table of size/offset pairs using a pseudorandom number generator.
+ */
+ struct Record
+ {
+ int index;
+ int bytes;
+ int offset;
+ };
+
+ typedef HeapBlock Records;
+
+ // Produce the pseudo-random set of records.
+ static void createRecords (HeapBlock & records,
+ int numRecords,
+ int maxBytes,
+ int64 seedValue)
+ {
+ using namespace UnitTestUtilities;
+
+ Random r (seedValue);
+
+ records.malloc (numRecords);
+
+ int offset = 0;
+
+ for (int i = 0; i < numRecords; ++i)
+ {
+ int const bytes = r.nextInt (maxBytes) + 1;
+
+ records [i].index = i;
+ records [i].bytes = bytes;
+ records [i].offset = offset;
+
+ offset += bytes;
+ }
+
+ repeatableShuffle (numRecords, records, seedValue);
+ }
+
+ // Write all the records to the file.
+ // The payload is pseudo-randomly generated.
+ void writeRecords (RandomAccessFile& file,
+ int numRecords,
+ HeapBlock const& records,
+ int64 seedValue)
+ {
+ using namespace UnitTestUtilities;
+
+ for (int i = 0; i < numRecords; ++i)
+ {
+ Payload p (records [i].bytes);
+
+ p.repeatableRandomFill (records [i].bytes,
+ records [i].bytes,
+ records [i].index + seedValue);
+
+ file.setPosition (records [i].offset);
+
+ Result result = file.write (p.data.getData (), p.bytes);
+
+ expect (result.wasOk (), "Should be ok");
+ }
+ }
+
+ // Read the records and verify the consistency.
+ void readRecords (RandomAccessFile& file,
+ int numRecords,
+ HeapBlock const& records,
+ int64 seedValue)
+ {
+ using namespace UnitTestUtilities;
+
+ for (int i = 0; i < numRecords; ++i)
+ {
+ Record const& record (records [i]);
+
+ int const bytes = record.bytes;
+
+ Payload p1 (bytes);
+ Payload p2 (bytes);
+
+ p1.repeatableRandomFill (bytes, bytes, record.index + seedValue);
+
+ file.setPosition (record.offset);
+
+ Result result = file.read (p2.data.getData (), bytes);
+
+ expect (result.wasOk (), "Should be ok");
+
+ if (result.wasOk ())
+ {
+ p2.bytes = bytes;
+
+ expect (p1 == p2, "Should be equal");
+ }
+ }
+ }
+
+ // Perform the test at the given buffer size.
+ void testFile (int const numRecords)
+ {
+ using namespace UnitTestUtilities;
+
+ int const seedValue = 50;
+
+ beginTest (String ("numRecords=") + String (numRecords));
+
+ // Calculate the path
+ File const path (File::createTempFile ("RandomAccessFile"));
+
+ // Create a predictable set of records
+ HeapBlock records (numRecords);
+ createRecords (records, numRecords, maxPayload, seedValue);
+
+ Result result (Result::ok ());
+
+ {
+ // Create the file
+ RandomAccessFile file;
+ result = file.open (path, RandomAccessFile::readWrite);
+ expect (result.wasOk (), "Should be ok");
+
+ if (result.wasOk ())
+ {
+ writeRecords (file, numRecords, records, seedValue);
+
+ readRecords (file, numRecords, records, seedValue);
+
+ repeatableShuffle (numRecords, records, seedValue);
+
+ readRecords (file, numRecords, records, seedValue);
+ }
+ }
+
+ if (result.wasOk ())
+ {
+ // Re-open the file in read only mode
+ RandomAccessFile file;
+ result = file.open (path, RandomAccessFile::readOnly);
+ expect (result.wasOk (), "Should be ok");
+
+ if (result.wasOk ())
+ {
+ readRecords (file, numRecords, records, seedValue);
+ }
+ }
+ }
+
+ void runTest ()
+ {
+ testFile (10000);
+ }
+
+private:
+};
+
+static RandomAccessFileTests randomAccessFileTests;
diff --git a/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h
new file mode 100644
index 0000000000..2b7c9505c6
--- /dev/null
+++ b/Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h
@@ -0,0 +1,197 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_RANDOMACCESSFILE_H_INCLUDED
+#define BEAST_RANDOMACCESSFILE_H_INCLUDED
+
+#include "../misc/beast_Result.h"
+
+/** Provides random access reading and writing to an operating system file.
+
+ This class wraps the underlying native operating system routines for
+ opening and closing a file for reading and/or writing, seeking within
+ the file, and performing read and write operations. There are also methods
+ provided for obtaining an input or output stream which will work with
+ the file.
+
+ @note All files are opened in binary mode. No text newline conversions
+ are performed.
+
+ @note None of these members are thread safe. The caller is responsible
+ for synchronization.
+
+ @see FileInputStream, FileOutputStream
+*/
+class BEAST_API RandomAccessFile : Uncopyable, LeakChecked
+{
+public:
+ /** The type of an FileOffset.
+
+ This can be useful when writing templates.
+ */
+ typedef int64 FileOffset;
+
+ /** The type of a byte count.
+
+ This can be useful when writing templates.
+ */
+ typedef size_t ByteCount;
+
+ /** The access mode.
+
+ @see open
+ */
+ enum Mode
+ {
+ readOnly,
+ readWrite
+ };
+
+ //==============================================================================
+ /** Creates an unopened file object.
+
+ @see open, isOpen
+ */
+ RandomAccessFile () noexcept;
+
+ /** Destroy the file object.
+
+ If the operating system file is open it will be closed.
+ */
+ ~RandomAccessFile ();
+
+ /** Determine if a file is open.
+
+ @return `true` if the operating system file is open.
+ */
+ bool isOpen () const noexcept { return fileHandle != nullptr; }
+
+ /** Opens a file object.
+
+ The file is opened with the specified permissions. The initial
+ position is set to the beginning of the file.
+
+ @note If a file is already open, it will be closed first.
+
+ @param path The path to the file
+ @param mode The access permissions
+ @return An indication of the success of the operation.
+
+ @see Mode
+ */
+ Result open (File const& path, Mode mode);
+
+ /** Closes the file object.
+
+ Any data that needs to be flushed will be written before the file is closed.
+
+ @note If no file is opened, this call does nothing.
+ */
+ void close ();
+
+ /** Retrieve the @ref File associated with this object.
+
+ @return The associated @ref File.
+ */
+ File const& getFile () const noexcept { return file; }
+
+ /** Get the current position.
+
+ The next read or write will take place from here.
+
+ @return The current position, as an absolute byte FileOffset from the begining.
+ */
+ FileOffset getPosition () const noexcept { return currentPosition; }
+
+ /** Set the current position.
+
+ The next read or write will take place at this location.
+
+ @param newPosition The byte FileOffset from the beginning of the file to move to.
+
+ @return `true` if the operation was successful.
+ */
+ Result setPosition (FileOffset newPosition);
+
+ /** Read data at the current position.
+
+ The caller is responsible for making sure that the memory pointed to
+ by `buffer` is at least as large as `bytesToRead`.
+
+ @note The file must have been opened with read permission.
+
+ @param buffer The memory to store the incoming data
+ @param numBytes The number of bytes to read.
+ @param pActualAmount Pointer to store the actual amount read, or `nullptr`.
+
+ @return `true` if all the bytes were read.
+ */
+ Result read (void* buffer, ByteCount numBytes, ByteCount* pActualAmount = 0);
+
+ /** Write data at the current position.
+
+ The current position is advanced past the data written. If data is
+ written past the end of the file, the file size is increased on disk.
+
+ The caller is responsible for making sure that the memory pointed to
+ by `buffer` is at least as large as `bytesToWrite`.
+
+ @note The file must have been opened with write permission.
+
+ @param data A pointer to the data buffer to write to the file.
+ @param numBytes The number of bytes to write.
+ @param pActualAmount Pointer to store the actual amount written, or `nullptr`.
+
+ @return `true` if all the data was written.
+ */
+ Result write (const void* data, ByteCount numBytes, ByteCount* pActualAmount = 0);
+
+ /** Truncate the file at the current position.
+ */
+ Result truncate ();
+
+ /** Flush the output buffers.
+
+ This calls the operating system to make sure all data has been written.
+ */
+ Result flush();
+
+ //==============================================================================
+private:
+ // Some of these these methods are implemented natively on
+ // the corresponding platform.
+ //
+ // See beast_posix_SharedCode.h and beast_win32_Files.cpp
+ //
+ Result nativeOpen (File const& path, Mode mode);
+ void nativeClose ();
+ Result nativeSetPosition (FileOffset newPosition);
+ Result nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount = 0);
+ Result nativeWrite (const void* data, ByteCount numBytes, ByteCount* pActualAmount = 0);
+ Result nativeTruncate ();
+ Result nativeFlush ();
+
+private:
+ File file;
+ void* fileHandle;
+ FileOffset currentPosition;
+};
+
+#endif
+
diff --git a/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp b/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp
index d712896a1f..216bdf7741 100644
--- a/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp
+++ b/Subtrees/beast/modules/beast_core/json/beast_JSON.cpp
@@ -535,7 +535,7 @@ void JSON::writeToStream (OutputStream& output, const var& data, const bool allO
class JSONTests : public UnitTest
{
public:
- JSONTests() : UnitTest ("JSON") { }
+ JSONTests() : UnitTest ("JSON", "beast") { }
static String createRandomWideCharString (Random& r)
{
@@ -639,6 +639,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static JSONTests jsonTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp b/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp
index ce3199699c..9f381e8479 100644
--- a/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp
+++ b/Subtrees/beast/modules/beast_core/maths/beast_Random.cpp
@@ -24,6 +24,7 @@
Random::Random (const int64 seedValue) noexcept
: seed (seedValue)
{
+ nextInt (); // fixes a bug where the first int is always 0
}
Random::Random()
@@ -39,6 +40,8 @@ Random::~Random() noexcept
void Random::setSeed (const int64 newSeed) noexcept
{
seed = newSeed;
+
+ nextInt (); // fixes a bug where the first int is always 0
}
void Random::combineSeed (const int64 seedValue) noexcept
@@ -56,6 +59,8 @@ void Random::setSeedRandomly()
combineSeed (Time::getHighResolutionTicksPerSecond());
combineSeed (Time::currentTimeMillis());
globalSeed ^= seed;
+
+ nextInt (); // fixes a bug where the first int is always 0
}
Random& Random::getSystemRandom() noexcept
@@ -98,6 +103,23 @@ double Random::nextDouble() noexcept
return static_cast (nextInt()) / (double) 0xffffffff;
}
+void Random::nextBlob (void* buffer, size_t bytes)
+{
+ int const remainder = bytes % sizeof (int64);
+
+ {
+ int64* dest = static_cast (buffer);
+ for (int i = bytes / sizeof (int64); i > 0; --i)
+ *dest++ = nextInt64 ();
+ buffer = dest;
+ }
+
+ {
+ int64 const val = nextInt64 ();
+ memcpy (buffer, &val, remainder);
+ }
+}
+
BigInteger Random::nextLargeNumber (const BigInteger& maximumValue)
{
BigInteger n;
@@ -137,7 +159,7 @@ void Random::fillBitsRandomly (BigInteger& arrayToChange, int startBit, int numB
class RandomTests : public UnitTest
{
public:
- RandomTests() : UnitTest ("Random") {}
+ RandomTests() : UnitTest ("Random", "beast") {}
void runTest()
{
@@ -165,6 +187,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static RandomTests randomTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/maths/beast_Random.h b/Subtrees/beast/modules/beast_core/maths/beast_Random.h
index f35c0eed92..1e68b1959c 100644
--- a/Subtrees/beast/modules/beast_core/maths/beast_Random.h
+++ b/Subtrees/beast/modules/beast_core/maths/beast_Random.h
@@ -89,6 +89,10 @@ public:
*/
bool nextBool() noexcept;
+ /** Fills a piece of memory with random data.
+ */
+ void nextBlob (void* buffer, size_t bytes);
+
/** Returns a BigInteger containing a random number.
@returns a random value in the range 0 to (maximumValue - 1).
diff --git a/Subtrees/beast/modules/beast_core/memory/beast_RecycledObjectPool.h b/Subtrees/beast/modules/beast_core/memory/beast_RecycledObjectPool.h
new file mode 100644
index 0000000000..6981427bf5
--- /dev/null
+++ b/Subtrees/beast/modules/beast_core/memory/beast_RecycledObjectPool.h
@@ -0,0 +1,126 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_RECYCLEDOBJECTPOOL_H_INCLUDED
+#define BEAST_RECYCLEDOBJECTPOOL_H_INCLUDED
+
+/** A pool of objects which may be recycled.
+
+ This is a thread safe pool of objects that get re-used. It is
+ primarily designed to eliminate the need for many memory allocations
+ and frees when temporary buffers are needed for operations.
+
+ To use it, first declare a structure containing the information
+ that you want to recycle. Then when you want to use a recycled object
+ put a ScopedItem on your stack:
+
+ @code
+
+ struct StdString
+ {
+ std::string data;
+ };
+
+ RecycledObjectPool pool;
+
+ void foo ()
+ {
+ RecycledObjectPool ::ScopedItem item;
+
+ item.getObject ().data = "text";
+ }
+
+ @endcode
+*/
+template
+class RecycledObjectPool
+{
+public:
+ struct Item : Object, LockFreeStack - ::Node, LeakChecked
-
+ {
+ };
+
+ class ScopedItem
+ {
+ public:
+ explicit ScopedItem (RecycledObjectPool
& pool)
+ : m_pool (pool)
+ , m_item (pool.get ())
+ {
+ }
+
+ ~ScopedItem ()
+ {
+ m_pool.release (m_item);
+ }
+
+ Object& getObject () noexcept
+ {
+ return *m_item;
+ }
+
+ private:
+ RecycledObjectPool & m_pool;
+ Item* const m_item;
+ };
+
+public:
+ RecycledObjectPool () noexcept
+ {
+ }
+
+ ~RecycledObjectPool ()
+ {
+ for (;;)
+ {
+ Item* const item = m_stack.pop_front ();
+
+ if (item != nullptr)
+ delete item;
+ else
+ break;
+ }
+ }
+
+private:
+ Item* get ()
+ {
+ Item* item = m_stack.pop_front ();
+
+ if (item == nullptr)
+ {
+ item = new Item;
+
+ if (item == nullptr)
+ Throw (std::bad_alloc ());
+ }
+
+ return item;
+ }
+
+ void release (Item* item) noexcept
+ {
+ m_stack.push_front (item);
+ }
+
+private:
+ LockFreeStack - m_stack;
+};
+
+#endif
diff --git a/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h b/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h
index e1f1a614b1..349dde0a10 100644
--- a/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h
+++ b/Subtrees/beast/modules/beast_core/memory/beast_Uncopyable.h
@@ -45,13 +45,16 @@
@code
- class MyClass : Uncopyable
+ class MyClass : public Uncopyable
{
public:
//...
};
@endcode
+
+ @note The derivation should be public or else child classes which
+ also derive from Uncopyable may not compile.
*/
class Uncopyable
{
diff --git a/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp b/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp
index a61394f08f..a3ad744474 100644
--- a/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp
+++ b/Subtrees/beast/modules/beast_core/misc/beast_Result.cpp
@@ -21,6 +21,8 @@
*/
//==============================================================================
+Result::Result() noexcept {}
+
Result::Result (const String& message) noexcept
: errorMessage (message)
{
@@ -60,11 +62,6 @@ bool Result::operator!= (const Result& other) const noexcept
return errorMessage != other.errorMessage;
}
-Result Result::ok() noexcept
-{
- return Result (String::empty);
-}
-
Result Result::fail (const String& errorMessage) noexcept
{
return Result (errorMessage.isEmpty() ? "Unknown Error" : errorMessage);
diff --git a/Subtrees/beast/modules/beast_core/misc/beast_Result.h b/Subtrees/beast/modules/beast_core/misc/beast_Result.h
index 137daa830c..91bd1e1aee 100644
--- a/Subtrees/beast/modules/beast_core/misc/beast_Result.h
+++ b/Subtrees/beast/modules/beast_core/misc/beast_Result.h
@@ -26,10 +26,7 @@
#include "../text/beast_String.h"
-
-//==============================================================================
-/**
- Represents the 'success' or 'failure' of an operation, and holds an associated
+/** Represents the 'success' or 'failure' of an operation, and holds an associated
error message to describe the error when there's a failure.
E.g.
@@ -55,12 +52,12 @@
}
@endcode
*/
-class BEAST_API Result
+class BEAST_API Result
{
public:
//==============================================================================
/** Creates and returns a 'successful' result. */
- static Result ok() noexcept;
+ static Result ok() noexcept { return Result(); }
/** Creates a 'failure' result.
If you pass a blank error message in here, a default "Unknown Error" message
@@ -94,12 +91,12 @@ public:
const String& getErrorMessage() const noexcept;
//==============================================================================
- Result (const Result& other);
- Result& operator= (const Result& other);
+ Result (const Result&);
+ Result& operator= (const Result&);
#if BEAST_COMPILER_SUPPORTS_MOVE_SEMANTICS
- Result (Result&& other) noexcept;
- Result& operator= (Result&& other) noexcept;
+ Result (Result&&) noexcept;
+ Result& operator= (Result&&) noexcept;
#endif
bool operator== (const Result& other) const noexcept;
@@ -108,6 +105,9 @@ public:
private:
String errorMessage;
+ // The default constructor is not for public use!
+ // Instead, use Result::ok() or Result::fail()
+ Result() noexcept;
explicit Result (const String&) noexcept;
// These casts are private to prevent people trying to use the Result object in numeric contexts
@@ -115,5 +115,5 @@ private:
operator void*() const;
};
+#endif
-#endif // BEAST_RESULT_BEASTHEADER
diff --git a/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h b/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h
index 936be35f40..222490176e 100644
--- a/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h
+++ b/Subtrees/beast/modules/beast_core/native/beast_posix_SharedCode.h
@@ -504,6 +504,184 @@ Result FileOutputStream::truncate()
return getResultForReturnValue (ftruncate (getFD (fileHandle), (off_t) currentPosition));
}
+//==============================================================================
+
+Result RandomAccessFile::nativeOpen (File const& path, Mode mode)
+{
+ bassert (! isOpen ());
+
+ Result result (Result::ok ());
+
+ if (path.exists())
+ {
+ int oflag;
+ switch (mode)
+ {
+ case readOnly:
+ oflag = O_RDONLY;
+ break;
+
+ default:
+ case readWrite:
+ oflag = O_RDWR;
+ break;
+ };
+
+ const int f = ::open (path.getFullPathName().toUTF8(), oflag, 00644);
+
+ if (f != -1)
+ {
+ currentPosition = lseek (f, 0, SEEK_SET);
+
+ if (currentPosition >= 0)
+ {
+ file = path;
+ fileHandle = fdToVoidPointer (f);
+ }
+ else
+ {
+ result = getResultForErrno();
+ ::close (f);
+ }
+ }
+ else
+ {
+ result = getResultForErrno();
+ }
+ }
+ else if (mode == readWrite)
+ {
+ const int f = ::open (path.getFullPathName().toUTF8(), O_RDWR + O_CREAT, 00644);
+
+ if (f != -1)
+ {
+ file = path;
+ fileHandle = fdToVoidPointer (f);
+ }
+ else
+ {
+ result = getResultForErrno();
+ }
+ }
+ else
+ {
+ // file doesn't exist and we're opening read-only
+ Result::fail (String (strerror (ENOENT)));
+ }
+
+ return result;
+}
+
+void RandomAccessFile::nativeClose ()
+{
+ bassert (isOpen ());
+
+ file = File::nonexistent ();
+ ::close (getFD (fileHandle));
+ fileHandle = nullptr;
+ currentPosition = 0;
+}
+
+Result RandomAccessFile::nativeSetPosition (FileOffset newPosition)
+{
+ bassert (isOpen ());
+
+ off_t const actualPosition = lseek (getFD (fileHandle), newPosition, SEEK_SET);
+
+ currentPosition = actualPosition;
+
+ if (actualPosition != newPosition)
+ {
+ // VFALCO NOTE I dislike return from the middle but
+ // Result::ok() is showing up in the profile
+ //
+ return getResultForErrno();
+ }
+
+ return Result::ok();
+}
+
+Result RandomAccessFile::nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount)
+{
+ bassert (isOpen ());
+
+ ssize_t bytesRead = ::read (getFD (fileHandle), buffer, numBytes);
+
+ if (bytesRead < 0)
+ {
+ if (pActualAmount != nullptr)
+ *pActualAmount = 0;
+
+ // VFALCO NOTE I dislike return from the middle but
+ // Result::ok() is showing up in the profile
+ //
+ return getResultForErrno();
+ }
+
+ currentPosition += bytesRead;
+
+ if (pActualAmount != nullptr)
+ *pActualAmount = bytesRead;
+
+ return Result::ok();
+}
+
+Result RandomAccessFile::nativeWrite (void const* data, ByteCount numBytes, size_t* pActualAmount)
+{
+ bassert (isOpen ());
+
+ ssize_t bytesWritten = ::write (getFD (fileHandle), data, numBytes);
+
+ // write(3) says that the actual return will be exactly -1 on
+ // error, but we will assume anything negative indicates failure.
+ //
+ if (bytesWritten < 0)
+ {
+ if (pActualAmount != nullptr)
+ *pActualAmount = 0;
+
+ // VFALCO NOTE I dislike return from the middle but
+ // Result::ok() is showing up in the profile
+ //
+ return getResultForErrno();
+ }
+
+ if (pActualAmount != nullptr)
+ *pActualAmount = bytesWritten;
+
+ return Result::ok();
+}
+
+Result RandomAccessFile::nativeTruncate ()
+{
+ bassert (isOpen ());
+
+ flush();
+
+ return getResultForReturnValue (ftruncate (getFD (fileHandle), (off_t) currentPosition));
+}
+
+Result RandomAccessFile::nativeFlush ()
+{
+ bassert (isOpen ());
+
+ Result result (Result::ok ());
+
+ if (fsync (getFD (fileHandle)) == -1)
+ result = getResultForErrno();
+
+#if BEAST_ANDROID
+ // This stuff tells the OS to asynchronously update the metadata
+ // that the OS has cached aboud the file - this metadata is used
+ // when the device is acting as a USB drive, and unless it's explicitly
+ // refreshed, it'll get out of step with the real file.
+ const LocalRef
t (javaString (file.getFullPathName()));
+ android.activity.callVoidMethod (BeastAppActivity.scanFile, t.get());
+#endif
+
+ return result;
+}
+
//==============================================================================
String SystemStats::getEnvironmentVariable (const String& name, const String& defaultValue)
{
diff --git a/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp b/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp
index 444bc51c3e..302ba9a960 100644
--- a/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp
+++ b/Subtrees/beast/modules/beast_core/native/beast_win32_Files.cpp
@@ -307,6 +307,163 @@ Result FileOutputStream::truncate()
: WindowsFileHelpers::getResultForLastError();
}
+//==============================================================================
+
+Result RandomAccessFile::nativeOpen (File const& path, Mode mode)
+{
+ bassert (! isOpen ());
+
+ Result result (Result::ok ());
+
+ DWORD dwDesiredAccess;
+ switch (mode)
+ {
+ case readOnly:
+ dwDesiredAccess = GENERIC_READ;
+ break;
+
+ default:
+ case readWrite:
+ dwDesiredAccess = GENERIC_READ | GENERIC_WRITE;
+ break;
+ };
+
+ DWORD dwCreationDisposition;
+ switch (mode)
+ {
+ case readOnly:
+ dwCreationDisposition = OPEN_EXISTING;
+ break;
+
+ default:
+ case readWrite:
+ dwCreationDisposition = OPEN_ALWAYS;
+ break;
+ };
+
+ HANDLE h = CreateFile (path.getFullPathName().toWideCharPointer(),
+ dwDesiredAccess,
+ FILE_SHARE_READ,
+ 0,
+ dwCreationDisposition,
+ FILE_ATTRIBUTE_NORMAL,
+ 0);
+
+ if (h != INVALID_HANDLE_VALUE)
+ {
+ file = path;
+ fileHandle = h;
+
+ result = setPosition (0);
+
+ if (result.failed ())
+ nativeClose ();
+ }
+ else
+ {
+ result = WindowsFileHelpers::getResultForLastError();
+ }
+
+ return result;
+}
+
+void RandomAccessFile::nativeClose ()
+{
+ bassert (isOpen ());
+
+ CloseHandle ((HANDLE) fileHandle);
+
+ file = File::nonexistent ();
+ fileHandle = nullptr;
+ currentPosition = 0;
+}
+
+Result RandomAccessFile::nativeSetPosition (FileOffset newPosition)
+{
+ bassert (isOpen ());
+
+ Result result (Result::ok ());
+
+ LARGE_INTEGER li;
+ li.QuadPart = newPosition;
+ li.LowPart = SetFilePointer ((HANDLE) fileHandle,
+ (LONG) li.LowPart,
+ &li.HighPart,
+ FILE_BEGIN);
+
+ if (li.LowPart != INVALID_SET_FILE_POINTER)
+ {
+ currentPosition = li.QuadPart;
+ }
+ else
+ {
+ result = WindowsFileHelpers::getResultForLastError();
+ }
+
+ return result;
+}
+
+Result RandomAccessFile::nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount)
+{
+ bassert (isOpen ());
+
+ Result result (Result::ok ());
+
+ DWORD actualNum = 0;
+
+ if (! ReadFile ((HANDLE) fileHandle, buffer, (DWORD) numBytes, &actualNum, 0))
+ result = WindowsFileHelpers::getResultForLastError();
+
+ currentPosition += actualNum;
+
+ if (pActualAmount != nullptr)
+ *pActualAmount = actualNum;
+
+ return result;
+}
+
+Result RandomAccessFile::nativeWrite (void const* data, ByteCount numBytes, size_t* pActualAmount)
+{
+ bassert (isOpen ());
+
+ Result result (Result::ok ());
+
+ DWORD actualNum = 0;
+
+ if (! WriteFile ((HANDLE) fileHandle, data, (DWORD) numBytes, &actualNum, 0))
+ result = WindowsFileHelpers::getResultForLastError();
+
+ if (pActualAmount != nullptr)
+ *pActualAmount = actualNum;
+
+ return result;
+}
+
+Result RandomAccessFile::nativeTruncate ()
+{
+ bassert (isOpen ());
+
+ Result result (Result::ok ());
+
+ if (! SetEndOfFile ((HANDLE) fileHandle))
+ result = WindowsFileHelpers::getResultForLastError();
+
+ return result;
+}
+
+Result RandomAccessFile::nativeFlush ()
+{
+ bassert (isOpen ());
+
+ Result result (Result::ok ());
+
+ if (! FlushFileBuffers ((HANDLE) fileHandle))
+ result = WindowsFileHelpers::getResultForLastError();
+
+ return result;
+}
+
+
//==============================================================================
void MemoryMappedFile::openInternal (const File& file, AccessMode mode)
{
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp
index ebefef3171..ac47e96991 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp
+++ b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.cpp
@@ -65,6 +65,8 @@ short InputStream::readShortBigEndian()
int InputStream::readInt()
{
+ static_bassert (sizeof (int) == 4);
+
char temp[4];
if (read (temp, 4) == 4)
@@ -73,6 +75,16 @@ int InputStream::readInt()
return 0;
}
+int32 InputStream::readInt32()
+{
+ char temp[4];
+
+ if (read (temp, 4) == 4)
+ return (int32) ByteOrder::littleEndianInt (temp);
+
+ return 0;
+}
+
int InputStream::readIntBigEndian()
{
char temp[4];
@@ -83,6 +95,16 @@ int InputStream::readIntBigEndian()
return 0;
}
+int32 InputStream::readInt32BigEndian()
+{
+ char temp[4];
+
+ if (read (temp, 4) == 4)
+ return (int32) ByteOrder::bigEndianInt (temp);
+
+ return 0;
+}
+
int InputStream::readCompressedInt()
{
const uint8 sizeByte = (uint8) readByte();
@@ -229,3 +251,71 @@ void InputStream::skipNextBytes (int64 numBytesToSkip)
numBytesToSkip -= read (temp, (int) bmin (numBytesToSkip, (int64) skipBufferSize));
}
}
+
+//------------------------------------------------------------------------------
+
+// Unfortunately, putting these in the header causes duplicate
+// definition linker errors, even with the inline keyword!
+
+template <>
+char InputStream::readType () { return readByte (); }
+
+template <>
+short InputStream::readType () { return readShort (); }
+
+template <>
+int32 InputStream::readType () { return readInt32 (); }
+
+template <>
+int64 InputStream::readType () { return readInt64 (); }
+
+template <>
+unsigned char InputStream::readType () { return static_cast (readByte ()); }
+
+template <>
+unsigned short InputStream::readType () { return static_cast (readShort ()); }
+
+template <>
+uint32 InputStream::readType () { return static_cast (readInt32 ()); }
+
+template <>
+uint64 InputStream::readType () { return static_cast (readInt64 ()); }
+
+template <>
+float InputStream::readType () { return readFloat (); }
+
+template <>
+double InputStream::readType () { return readDouble (); }
+
+//------------------------------------------------------------------------------
+
+template <>
+char InputStream::readTypeBigEndian () { return readByte (); }
+
+template <>
+short InputStream::readTypeBigEndian () { return readShortBigEndian (); }
+
+template <>
+int32 InputStream::readTypeBigEndian () { return readInt32BigEndian (); }
+
+template <>
+int64 InputStream::readTypeBigEndian () { return readInt64BigEndian (); }
+
+template <>
+unsigned char InputStream::readTypeBigEndian () { return static_cast (readByte ()); }
+
+template <>
+unsigned short InputStream::readTypeBigEndian () { return static_cast (readShortBigEndian ()); }
+
+template <>
+uint32 InputStream::readTypeBigEndian () { return static_cast (readInt32BigEndian ()); }
+
+template <>
+uint64 InputStream::readTypeBigEndian () { return static_cast (readInt64BigEndian ()); }
+
+template <>
+float InputStream::readTypeBigEndian () { return readFloatBigEndian (); }
+
+template <>
+double InputStream::readTypeBigEndian () { return readDoubleBigEndian (); }
+
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h
index 7d7e643234..081a7b037b 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h
+++ b/Subtrees/beast/modules/beast_core/streams/beast_InputStream.h
@@ -92,7 +92,7 @@ public:
/** Reads a boolean from the stream.
- The bool is encoded as a single byte - 1 for true, 0 for false.
+ The bool is encoded as a single byte - non-zero for true, 0 for false.
If the stream is exhausted, this will return false.
@@ -111,10 +111,13 @@ public:
*/
virtual short readShort();
+ // VFALCO TODO Implement these functions
+ //virtual int16 readInt16 ();
+ //virtual uint16 readUInt16 ();
+
/** Reads two bytes from the stream as a little-endian 16-bit value.
- If the next two bytes read are byte1 and byte2, this returns
- (byte2 | (byte1 << 8)).
+ If the next two bytes read are byte1 and byte2, this returns (byte1 | (byte2 << 8)).
If the stream is exhausted partway through reading the bytes, this will return zero.
@@ -131,6 +134,13 @@ public:
@see OutputStream::writeInt, readIntBigEndian
*/
+ virtual int32 readInt32();
+
+ // VFALCO TODO Implement these functions
+ //virtual int16 readInt16BigEndian ();
+ //virtual uint16 readUInt16BigEndian ();
+
+ // DEPRECATED, assumes sizeof(int) == 4!
virtual int readInt();
/** Reads four bytes from the stream as a big-endian 32-bit value.
@@ -142,6 +152,9 @@ public:
@see OutputStream::writeIntBigEndian, readInt
*/
+ virtual int32 readInt32BigEndian();
+
+ // DEPRECATED, assumes sizeof(int) == 4!
virtual int readIntBigEndian();
/** Reads eight bytes from the stream as a little-endian 64-bit value.
@@ -216,6 +229,49 @@ public:
*/
virtual int readCompressedInt();
+ /** Reads a type using a template specialization.
+
+ This is useful when doing template meta-programming.
+ */
+ template
+ T readType ();
+
+ /** Reads a type using a template specialization.
+
+ The variable is passed as a parameter so that the template type
+ can be deduced.
+
+ This is useful when doing template meta-programming.
+ */
+ template
+ void readTypeInto (T* p)
+ {
+ *p = readType ();
+ }
+
+ /** Reads a type from a big endian stream using a template specialization.
+
+ The raw encoding of the type is read from the stream as a big-endian value
+ where applicable.
+
+ This is useful when doing template meta-programming.
+ */
+ template
+ T readTypeBigEndian ();
+
+ /** Reads a type using a template specialization.
+
+ The variable is passed as a parameter so that the template type
+ can be deduced.
+
+ This is useful when doing template meta-programming.
+ */
+ template
+ void readTypeBigEndianInto (T* p)
+ {
+ *p = readTypeBigEndian ();
+ }
+
//==============================================================================
/** Reads a UTF-8 string from the stream, up to the next linefeed or carriage return.
@@ -289,4 +345,4 @@ protected:
InputStream() noexcept {}
};
-#endif // BEAST_INPUTSTREAM_BEASTHEADER
+#endif
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp
index 59c6078562..2cdd4198a2 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp
+++ b/Subtrees/beast/modules/beast_core/streams/beast_MemoryInputStream.cpp
@@ -92,7 +92,7 @@ int64 MemoryInputStream::getPosition()
class MemoryStreamTests : public UnitTest
{
public:
- MemoryStreamTests() : UnitTest ("MemoryStream") { }
+ MemoryStreamTests() : UnitTest ("MemoryStream", "beast") { }
void runTest()
{
@@ -148,6 +148,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static MemoryStreamTests memoryStreamTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp
index 0505920614..8df895da1b 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp
+++ b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.cpp
@@ -22,23 +22,28 @@
//==============================================================================
MemoryOutputStream::MemoryOutputStream (const size_t initialSize)
- : data (internalBlock),
- position (0),
- size (0)
+ : blockToUse (&internalBlock), externalData (nullptr),
+ position (0), size (0), availableSize (0)
{
internalBlock.setSize (initialSize, false);
}
MemoryOutputStream::MemoryOutputStream (MemoryBlock& memoryBlockToWriteTo,
const bool appendToExistingBlockContent)
- : data (memoryBlockToWriteTo),
- position (0),
- size (0)
+ : blockToUse (&memoryBlockToWriteTo), externalData (nullptr),
+ position (0), size (0), availableSize (0)
{
if (appendToExistingBlockContent)
position = size = memoryBlockToWriteTo.getSize();
}
+MemoryOutputStream::MemoryOutputStream (void* destBuffer, size_t destBufferSize)
+ : blockToUse (nullptr), externalData (destBuffer),
+ position (0), size (0), availableSize (destBufferSize)
+{
+ bassert (externalData != nullptr); // This must be a valid pointer.
+}
+
MemoryOutputStream::~MemoryOutputStream()
{
trimExternalBlockSize();
@@ -51,13 +56,14 @@ void MemoryOutputStream::flush()
void MemoryOutputStream::trimExternalBlockSize()
{
- if (&data != &internalBlock)
- data.setSize (size, false);
+ if (blockToUse != &internalBlock && blockToUse != nullptr)
+ blockToUse->setSize (size, false);
}
void MemoryOutputStream::preallocate (const size_t bytesToPreallocate)
{
- data.ensureSize (bytesToPreallocate + 1);
+ if (blockToUse != nullptr)
+ blockToUse->ensureSize (bytesToPreallocate + 1);
}
void MemoryOutputStream::reset() noexcept
@@ -71,10 +77,24 @@ char* MemoryOutputStream::prepareToWrite (size_t numBytes)
bassert ((ssize_t) numBytes >= 0);
size_t storageNeeded = position + numBytes;
- if (storageNeeded >= data.getSize())
- data.ensureSize ((storageNeeded + bmin (storageNeeded / 2, (size_t) (1024 * 1024)) + 32) & ~31u);
+ char* data;
- char* const writePointer = static_cast (data.getData()) + position;
+ if (blockToUse != nullptr)
+ {
+ if (storageNeeded >= blockToUse->getSize())
+ blockToUse->ensureSize ((storageNeeded + bmin (storageNeeded / 2, (size_t) (1024 * 1024)) + 32) & ~31u);
+
+ data = static_cast (blockToUse->getData());
+ }
+ else
+ {
+ if (storageNeeded > availableSize)
+ return nullptr;
+
+ data = static_cast (externalData);
+ }
+
+ char* const writePointer = data + position;
position += numBytes;
size = bmax (size, position);
return writePointer;
@@ -82,23 +102,43 @@ char* MemoryOutputStream::prepareToWrite (size_t numBytes)
bool MemoryOutputStream::write (const void* const buffer, size_t howMany)
{
- bassert (buffer != nullptr && ((ssize_t) howMany) >= 0);
+ bassert (buffer != nullptr);
- if (howMany > 0)
- memcpy (prepareToWrite (howMany), buffer, howMany);
+ if (howMany == 0)
+ return true;
- return true;
+ if (char* dest = prepareToWrite (howMany))
+ {
+ memcpy (dest, buffer, howMany);
+ return true;
+ }
+
+ return false;
}
-void MemoryOutputStream::writeRepeatedByte (uint8 byte, size_t howMany)
+bool MemoryOutputStream::writeRepeatedByte (uint8 byte, size_t howMany)
{
- if (howMany > 0)
- memset (prepareToWrite (howMany), byte, howMany);
+ if (howMany == 0)
+ return true;
+
+ if (char* dest = prepareToWrite (howMany))
+ {
+ memset (dest, byte, howMany);
+ return true;
+ }
+
+ return false;
}
-void MemoryOutputStream::appendUTF8Char (beast_wchar c)
+bool MemoryOutputStream::appendUTF8Char (beast_wchar c)
{
- CharPointer_UTF8 (prepareToWrite (CharPointer_UTF8::getBytesRequiredFor (c))).write (c);
+ if (char* dest = prepareToWrite (CharPointer_UTF8::getBytesRequiredFor (c)))
+ {
+ CharPointer_UTF8 (dest).write (c);
+ return true;
+ }
+
+ return false;
}
MemoryBlock MemoryOutputStream::getMemoryBlock() const
@@ -108,10 +148,13 @@ MemoryBlock MemoryOutputStream::getMemoryBlock() const
const void* MemoryOutputStream::getData() const noexcept
{
- if (data.getSize() > size)
- static_cast (data.getData()) [size] = 0;
+ if (blockToUse == nullptr)
+ return externalData;
- return data.getData();
+ if (blockToUse->getSize() > size)
+ static_cast (blockToUse->getData()) [size] = 0;
+
+ return blockToUse->getData();
}
bool MemoryOutputStream::setPosition (int64 newPosition)
@@ -137,7 +180,8 @@ int MemoryOutputStream::writeFromInputStream (InputStream& source, int64 maxNumB
if (maxNumBytesToWrite > availableData)
maxNumBytesToWrite = availableData;
- preallocate (data.getSize() + (size_t) maxNumBytesToWrite);
+ if (blockToUse != nullptr)
+ preallocate (blockToUse->getSize() + (size_t) maxNumBytesToWrite);
}
return OutputStream::writeFromInputStream (source, maxNumBytesToWrite);
@@ -162,4 +206,4 @@ OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const MemoryOutpu
stream.write (streamToRead.getData(), dataSize);
return stream;
-}
+}
\ No newline at end of file
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h
index 1413dcbf3d..be5fd04f28 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h
+++ b/Subtrees/beast/modules/beast_core/streams/beast_MemoryOutputStream.h
@@ -28,7 +28,6 @@
#include "../memory/beast_MemoryBlock.h"
#include "../memory/beast_ScopedPointer.h"
-
//==============================================================================
/**
Writes data to an internal memory buffer, which grows as required.
@@ -36,14 +35,20 @@
The data that was written into the stream can then be accessed later as
a contiguous block of memory.
*/
-class BEAST_API MemoryOutputStream
+//==============================================================================
+/**
+ Writes data to an internal memory buffer, which grows as required.
+
+ The data that was written into the stream can then be accessed later as
+ a contiguous block of memory.
+*/
+class BEAST_API MemoryOutputStream
: public OutputStream
, LeakChecked
{
public:
//==============================================================================
/** Creates an empty memory stream, ready to be written into.
-
@param initialSize the intial amount of capacity to allocate for writing into
*/
MemoryOutputStream (size_t initialSize = 256);
@@ -63,6 +68,14 @@ public:
MemoryOutputStream (MemoryBlock& memoryBlockToWriteTo,
bool appendToExistingBlockContent);
+ /** Creates a MemoryOutputStream that will write into a user-supplied, fixed-size
+ block of memory.
+
+ When using this mode, the stream will write directly into this memory area until
+ it's full, at which point write operations will fail.
+ */
+ MemoryOutputStream (void* destBuffer, size_t destBufferSize);
+
/** Destructor.
This will free any data that was written to it.
*/
@@ -88,7 +101,7 @@ public:
void preallocate (size_t bytesToPreallocate);
/** Appends the utf-8 bytes for a unicode character */
- void appendUTF8Char (beast_wchar character);
+ bool appendUTF8Char (beast_wchar character);
/** Returns a String created from the (UTF8) data that has been written to the stream. */
String toUTF8() const;
@@ -108,24 +121,24 @@ public:
*/
void flush();
- bool write (const void* buffer, size_t howMany);
- int64 getPosition() { return position; }
- bool setPosition (int64 newPosition);
- int writeFromInputStream (InputStream& source, int64 maxNumBytesToWrite);
- void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
+ bool write (const void*, size_t) override;
+ int64 getPosition() override { return position; }
+ bool setPosition (int64) override;
+ int writeFromInputStream (InputStream&, int64 maxNumBytesToWrite) override;
+ bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) override;
private:
- //==============================================================================
- MemoryBlock& data;
- MemoryBlock internalBlock;
- size_t position, size;
-
void trimExternalBlockSize();
char* prepareToWrite (size_t);
+
+ //==============================================================================
+ MemoryBlock* const blockToUse;
+ MemoryBlock internalBlock;
+ void* externalData;
+ size_t position, size, availableSize;
};
/** Copies all the data that has been written to a MemoryOutputStream into another stream. */
OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const MemoryOutputStream& streamToRead);
-
-#endif // BEAST_MEMORYOUTPUTSTREAM_BEASTHEADER
+#endif
\ No newline at end of file
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp
index 5089153779..614b32e1db 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp
+++ b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.cpp
@@ -61,48 +61,69 @@ OutputStream::~OutputStream()
}
//==============================================================================
-void OutputStream::writeBool (const bool b)
+bool OutputStream::writeBool (const bool b)
{
- writeByte (b ? (char) 1
- : (char) 0);
+ return writeByte (b ? (char) 1
+ : (char) 0);
}
-void OutputStream::writeByte (char byte)
+bool OutputStream::writeByte (char byte)
{
- write (&byte, 1);
+ return write (&byte, 1);
}
-void OutputStream::writeRepeatedByte (uint8 byte, size_t numTimesToRepeat)
+bool OutputStream::writeRepeatedByte (uint8 byte, size_t numTimesToRepeat)
{
for (size_t i = 0; i < numTimesToRepeat; ++i)
- writeByte ((char) byte);
+ if (! writeByte ((char) byte))
+ return false;
+
+ return true;
}
-void OutputStream::writeShort (short value)
+bool OutputStream::writeShort (short value)
{
const unsigned short v = ByteOrder::swapIfBigEndian ((unsigned short) value);
- write (&v, 2);
+ return write (&v, 2);
}
-void OutputStream::writeShortBigEndian (short value)
+bool OutputStream::writeShortBigEndian (short value)
{
const unsigned short v = ByteOrder::swapIfLittleEndian ((unsigned short) value);
- write (&v, 2);
+ return write (&v, 2);
}
-void OutputStream::writeInt (int value)
+bool OutputStream::writeInt32 (int32 value)
{
+ static_bassert (sizeof (int32) == 4);
+
+ const unsigned int v = ByteOrder::swapIfBigEndian ((uint32) value);
+ return write (&v, 4);
+}
+
+bool OutputStream::writeInt (int value)
+{
+ static_bassert (sizeof (int) == 4);
+
const unsigned int v = ByteOrder::swapIfBigEndian ((unsigned int) value);
- write (&v, 4);
+ return write (&v, 4);
}
-void OutputStream::writeIntBigEndian (int value)
+bool OutputStream::writeInt32BigEndian (int value)
{
- const unsigned int v = ByteOrder::swapIfLittleEndian ((unsigned int) value);
- write (&v, 4);
+ static_bassert (sizeof (int32) == 4);
+ const uint32 v = ByteOrder::swapIfLittleEndian ((uint32) value);
+ return write (&v, 4);
}
-void OutputStream::writeCompressedInt (int value)
+bool OutputStream::writeIntBigEndian (int value)
+{
+ static_bassert (sizeof (int) == 4);
+ const unsigned int v = ByteOrder::swapIfLittleEndian ((unsigned int) value);
+ return write (&v, 4);
+}
+
+bool OutputStream::writeCompressedInt (int value)
{
unsigned int un = (value < 0) ? (unsigned int) -value
: (unsigned int) value;
@@ -121,60 +142,60 @@ void OutputStream::writeCompressedInt (int value)
if (value < 0)
data[0] |= 0x80;
- write (data, num + 1);
+ return write (data, num + 1);
}
-void OutputStream::writeInt64 (int64 value)
+bool OutputStream::writeInt64 (int64 value)
{
const uint64 v = ByteOrder::swapIfBigEndian ((uint64) value);
- write (&v, 8);
+ return write (&v, 8);
}
-void OutputStream::writeInt64BigEndian (int64 value)
+bool OutputStream::writeInt64BigEndian (int64 value)
{
const uint64 v = ByteOrder::swapIfLittleEndian ((uint64) value);
- write (&v, 8);
+ return write (&v, 8);
}
-void OutputStream::writeFloat (float value)
+bool OutputStream::writeFloat (float value)
{
union { int asInt; float asFloat; } n;
n.asFloat = value;
- writeInt (n.asInt);
+ return writeInt (n.asInt);
}
-void OutputStream::writeFloatBigEndian (float value)
+bool OutputStream::writeFloatBigEndian (float value)
{
union { int asInt; float asFloat; } n;
n.asFloat = value;
- writeIntBigEndian (n.asInt);
+ return writeIntBigEndian (n.asInt);
}
-void OutputStream::writeDouble (double value)
+bool OutputStream::writeDouble (double value)
{
union { int64 asInt; double asDouble; } n;
n.asDouble = value;
- writeInt64 (n.asInt);
+ return writeInt64 (n.asInt);
}
-void OutputStream::writeDoubleBigEndian (double value)
+bool OutputStream::writeDoubleBigEndian (double value)
{
union { int64 asInt; double asDouble; } n;
n.asDouble = value;
- writeInt64BigEndian (n.asInt);
+ return writeInt64BigEndian (n.asInt);
}
-void OutputStream::writeString (const String& text)
+bool OutputStream::writeString (const String& text)
{
// (This avoids using toUTF8() to prevent the memory bloat that it would leave behind
// if lots of large, persistent strings were to be written to streams).
const size_t numBytes = text.getNumBytesAsUTF8() + 1;
HeapBlock temp (numBytes);
text.copyToUTF8 (temp, numBytes);
- write (temp, numBytes);
+ return write (temp, numBytes);
}
-void OutputStream::writeText (const String& text, const bool asUTF16,
+bool OutputStream::writeText (const String& text, const bool asUTF16,
const bool writeUTF16ByteOrderMark)
{
if (asUTF16)
@@ -196,7 +217,9 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
writeShort ((short) '\r');
lastCharWasReturn = (c == L'\r');
- writeShort ((short) c);
+
+ if (! writeShort ((short) c))
+ return false;
}
}
else
@@ -209,9 +232,12 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
if (*t == '\n')
{
if (t > src)
- write (src, (int) (t - src));
+ if (! write (src, (int) (t - src)))
+ return false;
+
+ if (! write ("\r\n", 2))
+ return false;
- write ("\r\n", 2);
src = t + 1;
}
else if (*t == '\r')
@@ -222,7 +248,8 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
else if (*t == 0)
{
if (t > src)
- write (src, (int) (t - src));
+ if (! write (src, (int) (t - src)))
+ return false;
break;
}
@@ -230,6 +257,8 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
++t;
}
}
+
+ return true;
}
int OutputStream::writeFromInputStream (InputStream& source, int64 numBytesToWrite)
@@ -318,3 +347,70 @@ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const N
{
return stream << stream.getNewLineString();
}
+
+//------------------------------------------------------------------------------
+
+// Unfortunately, putting these in the header causes duplicate
+// definition linker errors, even with the inline keyword!
+
+template <>
+BEAST_API bool OutputStream::writeType (char v) { return writeByte (v); }
+
+template <>
+BEAST_API bool OutputStream::writeType (short v) { return writeShort (v); }
+
+template <>
+BEAST_API bool OutputStream::writeType (int32 v) { return writeInt32 (v); }
+
+template <>
+BEAST_API bool OutputStream::writeType (int64 v) { return writeInt64 (v); }
+
+template <>
+BEAST_API bool OutputStream::writeType (unsigned char v) { return writeByte (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeType (unsigned short v) { return writeShort (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeType (uint32 v) { return writeInt32 (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeType (uint64 v) { return writeInt64 (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeType (float v) { return writeFloat (v); }
+
+template <>
+BEAST_API bool OutputStream::writeType (double v) { return writeDouble (v); }
+
+//------------------------------------------------------------------------------
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (char v) { return writeByte (v); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (short v) { return writeShortBigEndian (v); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (int32 v) { return writeInt32BigEndian (v); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (int64 v) { return writeInt64BigEndian (v); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (unsigned char v) { return writeByte (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (unsigned short v) { return writeShortBigEndian (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (uint32 v) { return writeInt32BigEndian (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (uint64 v) { return writeInt64BigEndian (static_cast (v)); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (float v) { return writeFloatBigEndian (v); }
+
+template <>
+BEAST_API bool OutputStream::writeTypeBigEndian (double v) { return writeDoubleBigEndian (v); }
diff --git a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h
index 9365041ba8..b536c48a57 100644
--- a/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h
+++ b/Subtrees/beast/modules/beast_core/streams/beast_OutputStream.h
@@ -40,9 +40,7 @@ class File;
@see InputStream, MemoryOutputStream, FileOutputStream
*/
-class BEAST_API OutputStream
- : public Uncopyable
- , LeakChecked
+class BEAST_API OutputStream : public Uncopyable
{
protected:
//==============================================================================
@@ -92,75 +90,111 @@ public:
//==============================================================================
/** Writes a single byte to the stream.
-
+ @returns false if the write operation fails for some reason
@see InputStream::readByte
*/
- virtual void writeByte (char byte);
+ virtual bool writeByte (char byte);
/** Writes a boolean to the stream as a single byte.
This is encoded as a binary byte (not as text) with a value of 1 or 0.
+ @returns false if the write operation fails for some reason
@see InputStream::readBool
*/
- virtual void writeBool (bool boolValue);
+ virtual bool writeBool (bool boolValue);
/** Writes a 16-bit integer to the stream in a little-endian byte order.
This will write two bytes to the stream: (value & 0xff), then (value >> 8).
+ @returns false if the write operation fails for some reason
@see InputStream::readShort
*/
- virtual void writeShort (short value);
+ virtual bool writeShort (short value);
/** Writes a 16-bit integer to the stream in a big-endian byte order.
This will write two bytes to the stream: (value >> 8), then (value & 0xff).
+ @returns false if the write operation fails for some reason
@see InputStream::readShortBigEndian
*/
- virtual void writeShortBigEndian (short value);
+ virtual bool writeShortBigEndian (short value);
/** Writes a 32-bit integer to the stream in a little-endian byte order.
+ @returns false if the write operation fails for some reason
@see InputStream::readInt
*/
- virtual void writeInt (int value);
+ virtual bool writeInt32 (int32 value);
+
+ // DEPRECATED, assumes sizeof (int) == 4!
+ virtual bool writeInt (int value);
/** Writes a 32-bit integer to the stream in a big-endian byte order.
+ @returns false if the write operation fails for some reason
@see InputStream::readIntBigEndian
*/
- virtual void writeIntBigEndian (int value);
+ virtual bool writeInt32BigEndian (int32 value);
+
+ // DEPRECATED, assumes sizeof (int) == 4!
+ virtual bool writeIntBigEndian (int value);
/** Writes a 64-bit integer to the stream in a little-endian byte order.
+ @returns false if the write operation fails for some reason
@see InputStream::readInt64
*/
- virtual void writeInt64 (int64 value);
+ virtual bool writeInt64 (int64 value);
/** Writes a 64-bit integer to the stream in a big-endian byte order.
+ @returns false if the write operation fails for some reason
@see InputStream::readInt64BigEndian
*/
- virtual void writeInt64BigEndian (int64 value);
+ virtual bool writeInt64BigEndian (int64 value);
/** Writes a 32-bit floating point value to the stream in a binary format.
The binary 32-bit encoding of the float is written as a little-endian int.
+ @returns false if the write operation fails for some reason
@see InputStream::readFloat
*/
- virtual void writeFloat (float value);
+ virtual bool writeFloat (float value);
/** Writes a 32-bit floating point value to the stream in a binary format.
The binary 32-bit encoding of the float is written as a big-endian int.
+ @returns false if the write operation fails for some reason
@see InputStream::readFloatBigEndian
*/
- virtual void writeFloatBigEndian (float value);
+ virtual bool writeFloatBigEndian (float value);
/** Writes a 64-bit floating point value to the stream in a binary format.
The eight raw bytes of the double value are written out as a little-endian 64-bit int.
+ @returns false if the write operation fails for some reason
@see InputStream::readDouble
*/
- virtual void writeDouble (double value);
+ virtual bool writeDouble (double value);
/** Writes a 64-bit floating point value to the stream in a binary format.
The eight raw bytes of the double value are written out as a big-endian 64-bit int.
@see InputStream::readDoubleBigEndian
+ @returns false if the write operation fails for some reason
*/
- virtual void writeDoubleBigEndian (double value);
+ virtual bool writeDoubleBigEndian (double value);
- /** Writes a byte to the output stream a given number of times. */
- virtual void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
+ /** Write a type using a template specialization.
+
+ This is useful when doing template meta-programming.
+ */
+ template
+ bool writeType (T value);
+
+ /** Write a type using a template specialization.
+
+ The raw encoding of the type is written to the stream as a big-endian value
+ where applicable.
+
+ This is useful when doing template meta-programming.
+ */
+ template
+ bool writeTypeBigEndian (T value);
+
+ /** Writes a byte to the output stream a given number of times.
+ @returns false if the write operation fails for some reason
+ */
+ virtual bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
/** Writes a condensed binary encoding of a 32-bit integer.
@@ -170,9 +204,10 @@ public:
The format used is: number of significant bytes + up to 4 bytes in little-endian order.
+ @returns false if the write operation fails for some reason
@see InputStream::readCompressedInt
*/
- virtual void writeCompressedInt (int value);
+ virtual bool writeCompressedInt (int value);
/** Stores a string in the stream in a binary format.
@@ -184,9 +219,10 @@ public:
For appending text to a file, instead use writeText, or operator<<
+ @returns false if the write operation fails for some reason
@see InputStream::readString, writeText, operator<<
*/
- virtual void writeString (const String& text);
+ virtual bool writeString (const String& text);
/** Writes a string of text to the stream.
@@ -195,8 +231,9 @@ public:
of a file).
The method also replaces '\\n' characters in the text with '\\r\\n'.
+ @returns false if the write operation fails for some reason
*/
- virtual void writeText (const String& text,
+ virtual bool writeText (const String& text,
bool asUTF16,
bool writeUTF16ByteOrderMark);
@@ -206,6 +243,7 @@ public:
@param maxNumBytesToWrite the number of bytes to read from the stream (if this is
less than zero, it will keep reading until the input
is exhausted)
+ @returns the number of bytes written
*/
virtual int writeFromInputStream (InputStream& source, int64 maxNumBytesToWrite);
@@ -258,5 +296,4 @@ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, InputSt
*/
BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const NewLine&);
-
-#endif // BEAST_OUTPUTSTREAM_BEASTHEADER
+#endif
diff --git a/Subtrees/beast/modules/beast_core/text/beast_String.cpp b/Subtrees/beast/modules/beast_core/text/beast_String.cpp
index d8771bb31f..55ad6d5929 100644
--- a/Subtrees/beast/modules/beast_core/text/beast_String.cpp
+++ b/Subtrees/beast/modules/beast_core/text/beast_String.cpp
@@ -2078,7 +2078,7 @@ String String::fromUTF8 (const char* const buffer, int bufferSizeBytes)
class StringTests : public UnitTest
{
public:
- StringTests() : UnitTest ("String") { }
+ StringTests() : UnitTest ("String", "beast") { }
template
struct TestUTFConversion
@@ -2402,6 +2402,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static StringTests stringTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp b/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp
index 7a1f2a894c..6da5b587bd 100644
--- a/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp
+++ b/Subtrees/beast/modules/beast_core/text/beast_TextDiff.cpp
@@ -177,7 +177,7 @@ String TextDiff::Change::appliedTo (const String& text) const noexcept
class DiffTests : public UnitTest
{
public:
- DiffTests() : UnitTest ("TextDiff") {}
+ DiffTests() : UnitTest ("TextDiff", "beast") {}
static String createString()
{
@@ -229,6 +229,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static DiffTests diffTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp b/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp
index 4e809a7197..0c08aa82d4 100644
--- a/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp
+++ b/Subtrees/beast/modules/beast_core/threads/beast_ChildProcess.cpp
@@ -61,7 +61,7 @@ String ChildProcess::readAllProcessOutput()
class ChildProcessTests : public UnitTest
{
public:
- ChildProcessTests() : UnitTest ("ChildProcess") {}
+ ChildProcessTests() : UnitTest ("ChildProcess", "beast") {}
void runTest()
{
@@ -82,6 +82,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static ChildProcessTests childProcessTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp b/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp
index 999f35d444..7685cfa00a 100644
--- a/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp
+++ b/Subtrees/beast/modules/beast_core/threads/beast_Thread.cpp
@@ -255,7 +255,7 @@ void SpinLock::enter() const noexcept
class AtomicTests : public UnitTest
{
public:
- AtomicTests() : UnitTest ("Atomic") {}
+ AtomicTests() : UnitTest ("Atomic", "beast") {}
void runTest()
{
@@ -350,6 +350,4 @@ public:
};
};
-#if BEAST_UNIT_TESTS
static AtomicTests atomicTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp
index d8febfa5ad..7d66da47a0 100644
--- a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp
+++ b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.cpp
@@ -161,7 +161,7 @@ bool GZIPCompressorOutputStream::setPosition (int64 /*newPosition*/)
class GZIPTests : public UnitTest
{
public:
- GZIPTests() : UnitTest ("GZIP") {}
+ GZIPTests() : UnitTest ("GZIP", "beast") {}
void runTest()
{
@@ -205,6 +205,4 @@ public:
}
};
-#if BEAST_UNIT_TESTS
static GZIPTests gzipTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h
index c083afc45f..d13e72802a 100644
--- a/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h
+++ b/Subtrees/beast/modules/beast_core/zip/beast_GZIPCompressorOutputStream.h
@@ -80,9 +80,9 @@ public:
*/
void flush();
- int64 getPosition();
- bool setPosition (int64 newPosition);
- bool write (const void* destBuffer, size_t howMany);
+ int64 getPosition() override;
+ bool setPosition (int64) override;
+ bool write (const void*, size_t) override;
/** These are preset values that can be used for the constructor's windowBits paramter.
For more info about this, see the zlib documentation for its windowBits parameter.
diff --git a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp
index 916e6bad0d..b9a2b5d18f 100644
--- a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp
+++ b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.cpp
@@ -20,7 +20,7 @@
class UnsignedIntegerTests : public UnitTest
{
public:
- UnsignedIntegerTests () : UnitTest ("UnsignedInteger")
+ UnsignedIntegerTests () : UnitTest ("UnsignedInteger", "beast")
{
}
@@ -29,7 +29,7 @@ public:
{
String s;
- s << "UnsignedInteger <" << String(Bytes) << ">";
+ s << "Bytes=" << String(Bytes);
beginTest (s);
@@ -82,6 +82,4 @@ public:
private:
};
-#if BEAST_UNIT_TESTS
static UnsignedIntegerTests unsignedIntegerTests;
-#endif
diff --git a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h
index 9d4f950ea3..fc0fa167cb 100644
--- a/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h
+++ b/Subtrees/beast/modules/beast_crypto/math/beast_UnsignedInteger.h
@@ -28,7 +28,7 @@
@tparam Bytes The number of bytes of storage.
*/
-template
+template
class UnsignedInteger : public SafeBool >
{
public:
@@ -76,10 +76,10 @@ public:
template
UnsignedInteger & operator= (IntegerType value)
{
- static_bassert (sizeof (Bytes) >= sizeof (IntegerType));
+ static_bassert (Bytes >= sizeof (IntegerType));
clear ();
value = ByteOrder::swapIfLittleEndian (value);
- memcpy (end () - sizeof (value), &value, sizeof (value));
+ memcpy (end () - sizeof (value), &value, bmin (Bytes, sizeof (value)));
return *this;
}
@@ -234,28 +234,28 @@ public:
*/
bool operator< (UnsignedInteger const& other) const noexcept
{
- return compare (other) == -1;
+ return compare (other) < 0;
}
/** Ordered comparison.
*/
bool operator<= (UnsignedInteger const& other) const noexcept
{
- return compare (other) != 1;
+ return compare (other) <= 0;
}
/** Ordered comparison.
*/
bool operator> (UnsignedInteger const& other) const noexcept
{
- return compare (other) == 1;
+ return compare (other) > 0;
}
/** Ordered comparison.
*/
bool operator>= (UnsignedInteger const& other) const noexcept
{
- return compare (other) != -1;
+ return compare (other) >= 0;
}
/** Perform bitwise logical-not.
diff --git a/Subtrees/beast/modules/beast_db/beast_db.cpp b/Subtrees/beast/modules/beast_db/beast_db.cpp
new file mode 100644
index 0000000000..622c2afdbd
--- /dev/null
+++ b/Subtrees/beast/modules/beast_db/beast_db.cpp
@@ -0,0 +1,31 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#include "BeastConfig.h"
+
+#include "beast_db.h"
+
+#include "../beast_crypto/beast_crypto.h"
+
+namespace beast
+{
+
+#include "keyvalue/beast_KeyvaDB.cpp"
+
+}
diff --git a/Subtrees/beast/modules/beast_db/beast_db.h b/Subtrees/beast/modules/beast_db/beast_db.h
new file mode 100644
index 0000000000..1612a178d9
--- /dev/null
+++ b/Subtrees/beast/modules/beast_db/beast_db.h
@@ -0,0 +1,52 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_BEAST_DB_H_INCLUDED
+#define BEAST_BEAST_DB_H_INCLUDED
+
+//------------------------------------------------------------------------------
+
+/* If you fail to make sure that all your compile units are building Beast with
+ the same set of option flags, then there's a risk that different compile
+ units will treat the classes as having different memory layouts, leading to
+ very nasty memory corruption errors when they all get linked together.
+ That's why it's best to always include the BeastConfig.h file before any
+ beast headers.
+*/
+#ifndef BEAST_BEASTCONFIG_H_INCLUDED
+# ifdef _MSC_VER
+# pragma message ("Have you included your BeastConfig.h file before including the Beast headers?")
+# else
+# warning "Have you included your BeastConfig.h file before including the Beast headers?"
+# endif
+#endif
+
+#include "../beast_core/beast_core.h"
+#include "../beast_basics/beast_basics.h"
+
+//------------------------------------------------------------------------------
+
+namespace beast
+{
+
+#include "keyvalue/beast_KeyvaDB.h"
+
+}
+
+#endif
diff --git a/Subtrees/beast/modules/beast_db/beast_db.mm b/Subtrees/beast/modules/beast_db/beast_db.mm
new file mode 100644
index 0000000000..2ae0b83c82
--- /dev/null
+++ b/Subtrees/beast/modules/beast_db/beast_db.mm
@@ -0,0 +1,20 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#include "beast_db.cpp"
diff --git a/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp
new file mode 100644
index 0000000000..7867292d74
--- /dev/null
+++ b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp
@@ -0,0 +1,861 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+/*
+
+TODO
+
+- Check consistency / range checking on read
+
+- Cache top level tree nodes
+
+- Coalesce I/O in RandomAccessFile
+
+- Delete / file compaction
+
+*/
+
+class KeyvaDBImp : public KeyvaDB
+{
+private:
+ // These are stored in big endian format in the file.
+
+ // A file offset.
+ typedef int64 FileOffset;
+
+ // Index of a key.
+ //
+ // The value is broken up into two parts. The key block index,
+ // and a 1 based index within the keyblock corresponding to the
+ // internal key number.
+ //
+ typedef int32 KeyIndex;
+ typedef int32 KeyBlockIndex;
+
+ // Size of a value.
+ typedef uint32 ByteSize;
+
+private:
+ // returns the number of keys in a key block with the specified depth
+ static int calcKeysAtDepth (int depth)
+ {
+ return (1U << depth) - 1;
+ }
+
+ // returns the number of bytes in a key record
+ static int calcKeyRecordBytes (int keyBytes)
+ {
+ // This depends on the format of a serialized key record
+ return
+ sizeof (FileOffset) +
+ sizeof (ByteSize) +
+ sizeof (KeyIndex) +
+ sizeof (KeyIndex) +
+ keyBytes
+ ;
+ }
+
+ // returns the number of bytes in a key block
+ static int calcKeyBlockBytes (int depth, int keyBytes)
+ {
+ return calcKeysAtDepth (depth) * calcKeyRecordBytes (keyBytes);
+ }
+
+public:
+ enum
+ {
+ currentVersion = 1
+ };
+
+
+ //--------------------------------------------------------------------------
+
+ struct KeyAddress
+ {
+ // 1 based key block number
+ uint32 blockNumber;
+
+ // 1 based key index within the block, breadth-first left to right
+ uint32 keyNumber;
+ };
+
+ enum
+ {
+ // The size of the fixed area at the beginning of the key file.
+ // This is used to store some housekeeping information like the
+ // key size and version number.
+ //
+ masterHeaderBytes = 1000
+ };
+
+ // The master record is at the beginning of the key file
+ struct MasterRecord
+ {
+ // version number, starting from 1
+ int32 version;
+
+ KeyBlockIndex nextKeyBlockIndex;
+
+ void write (OutputStream& stream)
+ {
+ stream.writeTypeBigEndian (version);
+ }
+
+ void read (InputStream& stream)
+ {
+ stream.readTypeBigEndianInto (&version);
+ }
+ };
+
+ // Key records are indexed starting at one.
+ struct KeyRecord : Uncopyable
+ {
+ explicit KeyRecord (void* const keyStorage)
+ : key (keyStorage)
+ {
+ }
+
+ // Absolute byte FileOffset in the value file.
+ FileOffset valFileOffset;
+
+ // Size of the corresponding value, in bytes.
+ ByteSize valSize;
+
+ // Key record index of left node, or 0.
+ KeyIndex leftIndex;
+
+ // Key record index of right node, or 0.
+ KeyIndex rightIndex;
+
+ // Points to keyBytes storage of the key.
+ void* const key;
+ };
+
+ //--------------------------------------------------------------------------
+
+ // A complete keyblock. The contents of the memory for the key block
+ // are identical to the format on disk. Therefore it is necessary to
+ // use the serialization routines to extract or update the key records.
+ //
+ class KeyBlock : Uncopyable
+ {
+ public:
+ KeyBlock (int depth, int keyBytes)
+ : m_depth (depth)
+ , m_keyBytes (keyBytes)
+ , m_storage (calcKeyBlockBytes (depth, keyBytes))
+ {
+ }
+
+ void read (InputStream& stream)
+ {
+ stream.read (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes));
+ }
+
+ void write (OutputStream& stream)
+ {
+ stream.write (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes));
+ }
+
+ void readKeyRecord (KeyRecord* keyRecord, int keyIndex)
+ {
+ bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth));
+
+ size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
+
+ MemoryInputStream stream (
+ addBytesToPointer (m_storage.getData (), byteOffset),
+ calcKeyRecordBytes (m_keyBytes),
+ false);
+
+ stream.readTypeBigEndianInto (&keyRecord->valFileOffset);
+ stream.readTypeBigEndianInto (&keyRecord->valSize);
+ stream.readTypeBigEndianInto (&keyRecord->leftIndex);
+ stream.readTypeBigEndianInto (&keyRecord->rightIndex);
+ stream.read (keyRecord->key, m_keyBytes);
+ }
+
+#if 0
+ void writeKeyRecord (KeyRecord const& keyRecord, int keyIndex)
+ {
+ bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth));
+
+#if 0
+ size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
+
+ MemoryOutputStream stream (
+ addBytesToPointer (m_storage.getData (), byteOffset),
+ calcKeyRecordBytes (m_keyBytes));
+
+ stream.writeTypeBigEndian (keyRecord.valFileOffset);
+ stream.writeTypeBigEndian (keyRecord.valSize);
+ stream.writeTypeBigEndian (keyRecord.leftIndex);
+ stream.writeTypeBigEndian (keyRecord.rightIndex);
+ stream.write (keyRecord.key, m_keyBytes);
+#endif
+ }
+#endif
+
+ private:
+ int const m_depth;
+ int const m_keyBytes;
+ MemoryBlock m_storage;
+ };
+
+ //--------------------------------------------------------------------------
+
+ // Concurrent data
+ //
+ struct State
+ {
+ RandomAccessFile keyFile;
+ RandomAccessFile valFile;
+ MasterRecord masterRecord;
+ KeyIndex newKeyIndex;
+ FileOffset valFileSize;
+
+ bool hasKeys () const noexcept
+ {
+ return newKeyIndex > 1;
+ }
+ };
+
+ typedef SharedData SharedState;
+
+ //--------------------------------------------------------------------------
+
+ int const m_keyBytes;
+ int const m_keyBlockDepth;
+ SharedState m_state;
+ HeapBlock m_keyStorage;
+
+ //--------------------------------------------------------------------------
+
+ KeyvaDBImp (int keyBytes,
+ int keyBlockDepth,
+ File keyPath,
+ File valPath)
+ : m_keyBytes (keyBytes)
+ , m_keyBlockDepth (keyBlockDepth)
+ , m_keyStorage (keyBytes)
+ {
+ SharedState::WriteAccess state (m_state);
+
+ openFile (&state->keyFile, keyPath);
+
+ int64 const fileSize = state->keyFile.getFile ().getSize ();
+
+ if (fileSize == 0)
+ {
+ // VFALCO TODO Better error handling here
+ // initialize the key file
+ Result result = state->keyFile.setPosition (masterHeaderBytes - 1);
+ if (result.wasOk ())
+ {
+ char byte = 0;
+
+ result = state->keyFile.write (&byte, 1);
+
+ if (result.wasOk ())
+ {
+ state->keyFile.flush ();
+ }
+ }
+ }
+
+ state->newKeyIndex = 1 + (state->keyFile.getFile ().getSize () - masterHeaderBytes)
+ / calcKeyRecordBytes (m_keyBytes);
+
+ openFile (&state->valFile, valPath);
+
+ state->valFileSize = state->valFile.getFile ().getSize ();
+ }
+
+ ~KeyvaDBImp ()
+ {
+ SharedState::WriteAccess state (m_state);
+
+ flushInternal (state);
+ }
+
+ // Open a file for reading and writing.
+ // Creates the file if it doesn't exist.
+ static void openFile (RandomAccessFile* file, File path)
+ {
+ Result const result = file->open (path, RandomAccessFile::readWrite);
+
+ if (! result)
+ {
+ String s;
+ s << "KeyvaDB: Couldn't open " << path.getFileName () << " for writing.";
+ Throw (std::runtime_error (s.toStdString ()));
+ }
+ }
+
+ //--------------------------------------------------------------------------
+
+ Result createMasterRecord (SharedState::WriteAccess& state)
+ {
+ MemoryBlock buffer (masterHeaderBytes, true);
+
+ Result result = state->keyFile.setPosition (0);
+
+ if (result.wasOk ())
+ {
+ MasterRecord mr;
+
+ mr.version = 1;
+
+ result = state->keyFile.write (buffer.getData (), buffer.getSize ());
+ }
+
+ return result;
+ }
+
+ //--------------------------------------------------------------------------
+
+ FileOffset calcKeyRecordOffset (KeyIndex keyIndex)
+ {
+ bassert (keyIndex > 0);
+
+ FileOffset const byteOffset = masterHeaderBytes + (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
+
+ return byteOffset;
+ }
+
+ // Read a key record into memory.
+ // VFALCO TODO Return a Result and do validity checking on all inputs
+ //
+ void readKeyRecord (KeyRecord* const keyRecord,
+ KeyIndex const keyIndex,
+ SharedState::WriteAccess& state)
+ {
+ FileOffset const byteOffset = calcKeyRecordOffset (keyIndex);
+
+ Result result = state->keyFile.setPosition (byteOffset);
+
+ if (result.wasOk ())
+ {
+ MemoryBlock data (calcKeyRecordBytes (m_keyBytes));
+
+ size_t bytesRead;
+
+ result = state->keyFile.read (data.getData (), calcKeyRecordBytes (m_keyBytes), &bytesRead);
+
+ if (result.wasOk ())
+ {
+ if (bytesRead == calcKeyRecordBytes (m_keyBytes))
+ {
+ MemoryInputStream stream (data, false);
+
+ // This defines the file format!
+ stream.readTypeBigEndianInto (&keyRecord->valFileOffset);
+ stream.readTypeBigEndianInto (&keyRecord->valSize);
+ stream.readTypeBigEndianInto (&keyRecord->leftIndex);
+ stream.readTypeBigEndianInto (&keyRecord->rightIndex);
+
+ // Grab the key
+ stream.read (keyRecord->key, m_keyBytes);
+ }
+ else
+ {
+ result = Result::fail ("KeyvaDB: amountRead != calcKeyRecordBytes()");
+ }
+ }
+ }
+
+ if (! result.wasOk ())
+ {
+ String s;
+ s << "KeyvaDB readKeyRecord failed in " << state->keyFile.getFile ().getFileName ();
+ Throw (std::runtime_error (s.toStdString ()));
+ }
+ }
+
+ // Write a key record from memory
+ void writeKeyRecord (KeyRecord const& keyRecord,
+ KeyIndex const keyIndex,
+ SharedState::WriteAccess& state,
+ bool includingKey)
+ {
+ FileOffset const byteOffset = calcKeyRecordOffset (keyIndex);
+
+ int const bytes = calcKeyRecordBytes (m_keyBytes) - (includingKey ? 0 : m_keyBytes);
+
+ // VFALCO TODO Recycle this buffer
+ MemoryBlock data (bytes);
+
+ {
+ MemoryOutputStream stream (data, false);
+
+ // This defines the file format!
+ stream.writeTypeBigEndian (keyRecord.valFileOffset);
+ stream.writeTypeBigEndian (keyRecord.valSize);
+ stream.writeTypeBigEndian (keyRecord.leftIndex);
+ stream.writeTypeBigEndian (keyRecord.rightIndex);
+
+ // Write the key
+ if (includingKey)
+ stream.write (keyRecord.key, m_keyBytes);
+ }
+
+ Result result = state->keyFile.setPosition (byteOffset);
+
+ if (result.wasOk ())
+ {
+ size_t bytesWritten;
+
+ result = state->keyFile.write (data.getData (), bytes, &bytesWritten);
+
+ if (result.wasOk ())
+ {
+ if (bytesWritten != bytes)
+ {
+ result = Result::fail ("KeyvaDB: bytesWritten != bytes");
+ }
+ }
+ }
+
+ if (!result.wasOk ())
+ {
+ String s;
+ s << "KeyvaDB: writeKeyRecord failed in " << state->keyFile.getFile ().getFileName ();
+ Throw (std::runtime_error (s.toStdString ()));
+ }
+ }
+
+ // Append a value to the value file.
+ // VFALCO TODO return a Result
+ void writeValue (void const* const value, ByteSize valueBytes, SharedState::WriteAccess& state)
+ {
+ Result result = state->valFile.setPosition (state->valFileSize);
+
+ if (result.wasOk ())
+ {
+ size_t bytesWritten;
+
+ result = state->valFile.write (value, valueBytes, &bytesWritten);
+
+ if (result.wasOk ())
+ {
+ if (bytesWritten == valueBytes)
+ {
+ state->valFileSize += valueBytes;
+ }
+ else
+ {
+ result = Result::fail ("KeyvaDB: bytesWritten != valueBytes");
+ }
+ }
+ }
+
+ if (! result.wasOk ())
+ {
+ String s;
+ s << "KeyvaDB: writeValue failed in " << state->valFile.getFile ().getFileName ();
+ Throw (std::runtime_error (s.toStdString ()));
+ }
+ }
+
+ //--------------------------------------------------------------------------
+
+ struct FindResult : Uncopyable
+ {
+ FindResult (void* const keyStorage)
+ : keyRecord (keyStorage)
+ {
+ }
+
+ int compare; // result of the last comparison
+ KeyIndex keyIndex; // index we looked at last
+ //KeyBlock keyBlock; // KeyBlock we looked at last
+ KeyRecord keyRecord; // KeyRecord we looked at last
+ };
+
+ // Find a key. If the key doesn't exist, enough information
+ // is left behind in the result to perform an insertion.
+ //
+ // Returns true if the key was found.
+ //
+ bool find (FindResult* findResult, void const* key, SharedState::WriteAccess& state)
+ {
+ // Not okay to call this with an empty key file!
+ bassert (state->hasKeys ());
+
+ // This performs a standard binary search
+
+ findResult->keyIndex = 1;
+
+ do
+ {
+ readKeyRecord (&findResult->keyRecord, findResult->keyIndex, state);
+
+ findResult->compare = memcmp (key, findResult->keyRecord.key, m_keyBytes);
+
+ if (findResult->compare < 0)
+ {
+ if (findResult->keyRecord.leftIndex != 0)
+ {
+ // Go left
+ findResult->keyIndex = findResult->keyRecord.leftIndex;
+ }
+ else
+ {
+ // Insert position is to the left
+ break;
+ }
+ }
+ else if (findResult->compare > 0)
+ {
+ if (findResult->keyRecord.rightIndex != 0)
+ {
+ // Go right
+ findResult->keyIndex = findResult->keyRecord.rightIndex;
+ }
+ else
+ {
+ // Insert position is to the right
+ break;
+ }
+ }
+ }
+ while (findResult->compare != 0);
+
+ return findResult->compare == 0;
+ }
+
+ //--------------------------------------------------------------------------
+
+ bool get (void const* key, GetCallback* callback)
+ {
+ FindResult findResult (m_keyStorage.getData ());
+
+ SharedState::WriteAccess state (m_state);
+
+ bool found = false;
+
+ if (state->hasKeys ())
+ {
+ found = find (&findResult, key, state);
+
+ if (found)
+ {
+ void* const destStorage = callback->getStorageForValue (findResult.keyRecord.valSize);
+
+ Result result = state->valFile.setPosition (findResult.keyRecord.valFileOffset);
+
+ if (result.wasOk ())
+ {
+ size_t bytesRead;
+
+ result = state->valFile.read (destStorage, findResult.keyRecord.valSize, &bytesRead);
+
+ if (result.wasOk ())
+ {
+ if (bytesRead != findResult.keyRecord.valSize)
+ {
+ result = Result::fail ("KeyvaDB: bytesRead != valSize");
+ }
+ }
+ }
+
+ if (! result.wasOk ())
+ {
+ String s;
+ s << "KeyvaDB: get in " << state->valFile.getFile ().getFileName ();
+ Throw (std::runtime_error (s.toStdString ()));
+ }
+ }
+ }
+
+ return found;
+ }
+
+ //--------------------------------------------------------------------------
+
+ // Write a key value pair. Does nothing if the key exists.
+ void put (void const* key, void const* value, int valueBytes)
+ {
+ bassert (valueBytes > 0);
+
+ SharedState::WriteAccess state (m_state);
+
+ if (state->hasKeys ())
+ {
+ // Search for the key
+
+ FindResult findResult (m_keyStorage.getData ());
+
+ bool const found = find (&findResult, key, state);
+
+ if (! found )
+ {
+ bassert (findResult.compare != 0);
+
+ // Binary tree insertion.
+ // Link the last key record to the new key
+ {
+ if (findResult.compare < 0)
+ {
+ findResult.keyRecord.leftIndex = state->newKeyIndex;
+ }
+ else
+ {
+ findResult.keyRecord.rightIndex = state->newKeyIndex;
+ }
+
+ writeKeyRecord (findResult.keyRecord, findResult.keyIndex, state, false);
+ }
+
+ // Write the new key
+ {
+ findResult.keyRecord.valFileOffset = state->valFileSize;
+ findResult.keyRecord.valSize = valueBytes;
+ findResult.keyRecord.leftIndex = 0;
+ findResult.keyRecord.rightIndex = 0;
+
+ memcpy (findResult.keyRecord.key, key, m_keyBytes);
+
+ writeKeyRecord (findResult.keyRecord, state->newKeyIndex, state, true);
+ }
+
+ // Key file has grown by one.
+ ++state->newKeyIndex;
+
+ // Write the value
+ writeValue (value, valueBytes, state);
+ }
+ else
+ {
+ // Key already exists, do nothing.
+ // We could check to make sure the payloads are the same.
+ }
+ }
+ else
+ {
+ //
+ // Write first key
+ //
+
+ KeyRecord keyRecord (m_keyStorage.getData ());
+
+ keyRecord.valFileOffset = state->valFileSize;
+ keyRecord.valSize = valueBytes;
+ keyRecord.leftIndex = 0;
+ keyRecord.rightIndex = 0;
+
+ memcpy (keyRecord.key, key, m_keyBytes);
+
+ writeKeyRecord (keyRecord, state->newKeyIndex, state, true);
+
+ // Key file has grown by one.
+ ++state->newKeyIndex;
+
+ //
+ // Write value
+ //
+
+ bassert (state->valFileSize == 0);
+
+ writeValue (value, valueBytes, state);
+ }
+ }
+
+ //--------------------------------------------------------------------------
+
+ void flush ()
+ {
+ SharedState::WriteAccess state (m_state);
+
+ flushInternal (state);
+ }
+
+ void flushInternal (SharedState::WriteAccess& state)
+ {
+ state->keyFile.flush ();
+ state->valFile.flush ();
+ }
+};
+
+KeyvaDB* KeyvaDB::New (int keyBytes, int keyBlockDepth, File keyPath, File valPath)
+{
+ return new KeyvaDBImp (keyBytes, keyBlockDepth, keyPath, valPath);
+}
+
+//------------------------------------------------------------------------------
+
+class KeyvaDBTests : public UnitTest
+{
+public:
+ enum
+ {
+ maxPayloadBytes = 8 * 1024
+ };
+
+ KeyvaDBTests () : UnitTest ("KeyvaDB", "ripple")
+ {
+ }
+
+ // Retrieval callback stores the value in a Payload object for comparison
+ struct PayloadGetCallback : KeyvaDB::GetCallback
+ {
+ UnitTestUtilities::Payload payload;
+
+ PayloadGetCallback () : payload (maxPayloadBytes)
+ {
+ }
+
+ void* getStorageForValue (int valueBytes)
+ {
+ bassert (valueBytes <= maxPayloadBytes);
+
+ payload.bytes = valueBytes;
+
+ return payload.data.getData ();
+ }
+ };
+
+ KeyvaDB* createDB (unsigned int keyBytes, File const& path)
+ {
+ File const keyPath = path.withFileExtension (".key");
+ File const valPath = path.withFileExtension (".val");
+
+ return KeyvaDB::New (keyBytes, 1, keyPath, valPath);
+ }
+
+ void deleteDBFiles (File const& path)
+ {
+ File const keyPath = path.withFileExtension (".key");
+ File const valPath = path.withFileExtension (".val");
+
+ keyPath.deleteFile ();
+ valPath.deleteFile ();
+ }
+
+ template
+ void testKeySize (unsigned int const maxItems)
+ {
+ using namespace UnitTestUtilities;
+
+ typedef UnsignedInteger KeyType;
+
+ int64 const seedValue = 50;
+
+ String s;
+
+ s << "keyBytes=" << String (uint64(KeyBytes)) << ", maxItems=" << String (maxItems);
+ beginTest (s);
+
+ // Set up the key and value files
+ File const path (File::createTempFile (""));
+
+ {
+ // open the db
+ ScopedPointer db (createDB (KeyBytes, path));
+
+ Payload payload (maxPayloadBytes);
+ Payload check (maxPayloadBytes);
+
+ {
+ // Create an array of ascending integers.
+ HeapBlock items (maxItems);
+ for (unsigned int i = 0; i < maxItems; ++i)
+ items [i] = i;
+
+ // Now shuffle it deterministically.
+ repeatableShuffle (maxItems, items, seedValue);
+
+ // Write all the keys of integers.
+ for (unsigned int i = 0; i < maxItems; ++i)
+ {
+ unsigned int keyIndex = items [i];
+
+ KeyType const key = KeyType::createFromInteger (keyIndex);
+
+ payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
+
+ db->put (key.cbegin (), payload.data.getData (), payload.bytes);
+
+ {
+ // VFALCO TODO Check what we just wrote?
+ //db->get (key.cbegin (), check.data.getData (), payload.bytes);
+ }
+ }
+ }
+
+ {
+ // Go through all of our keys and try to retrieve them.
+ // since this is done in ascending order, we should get
+ // random seeks at this point.
+ //
+ PayloadGetCallback cb;
+ for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
+ {
+ KeyType const v = KeyType::createFromInteger (keyIndex);
+
+ bool const found = db->get (v.cbegin (), &cb);
+
+ expect (found, "Should be found");
+
+ if (found)
+ {
+ payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
+
+ expect (payload == cb.payload, "Should be equal");
+ }
+ }
+ }
+ }
+
+ {
+ // Re-open the database and confirm the data
+ ScopedPointer db (createDB (KeyBytes, path));
+
+ Payload payload (maxPayloadBytes);
+ Payload check (maxPayloadBytes);
+
+ PayloadGetCallback cb;
+ for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
+ {
+ KeyType const v = KeyType::createFromInteger (keyIndex);
+
+ bool const found = db->get (v.cbegin (), &cb);
+
+ expect (found, "Should be found");
+
+ if (found)
+ {
+ payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
+
+ expect (payload == cb.payload, "Should be equal");
+ }
+ }
+ }
+
+ deleteDBFiles (path);
+ }
+
+ void runTest ()
+ {
+ testKeySize <4> (500);
+ testKeySize <32> (4000);
+ }
+};
+
+static KeyvaDBTests keyvaDBTests;
diff --git a/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h
new file mode 100644
index 0000000000..20e4185f49
--- /dev/null
+++ b/Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h
@@ -0,0 +1,55 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2013, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_KEYVADB_H_INCLUDED
+#define BEAST_KEYVADB_H_INCLUDED
+
+/** Specialized Key/value database
+
+ Once written, a value can never be modified.
+*/
+class KeyvaDB : LeakChecked
+{
+public:
+ class GetCallback
+ {
+ public:
+ virtual void* getStorageForValue (int valueBytes) = 0;
+ };
+
+ static KeyvaDB* New (int keyBytes,
+ int keyBlockDepth,
+ File keyPath,
+ File valPath);
+
+ virtual ~KeyvaDB () { }
+
+ // VFALCO TODO Make the return value a Result so we can
+ // detect corruption and errors!
+ //
+ virtual bool get (void const* key, GetCallback* callback) = 0;
+
+ // VFALCO TODO Use Result for return value
+ //
+ virtual void put (void const* key, void const* value, int valueBytes) = 0;
+
+ virtual void flush () = 0;
+};
+
+#endif
diff --git a/libraries/liblmdb/.gitignore b/Subtrees/mdb/libraries/liblmdb/.gitignore
similarity index 100%
rename from libraries/liblmdb/.gitignore
rename to Subtrees/mdb/libraries/liblmdb/.gitignore
diff --git a/libraries/liblmdb/COPYRIGHT b/Subtrees/mdb/libraries/liblmdb/COPYRIGHT
similarity index 100%
rename from libraries/liblmdb/COPYRIGHT
rename to Subtrees/mdb/libraries/liblmdb/COPYRIGHT
diff --git a/libraries/liblmdb/Doxyfile b/Subtrees/mdb/libraries/liblmdb/Doxyfile
similarity index 100%
rename from libraries/liblmdb/Doxyfile
rename to Subtrees/mdb/libraries/liblmdb/Doxyfile
diff --git a/libraries/liblmdb/LICENSE b/Subtrees/mdb/libraries/liblmdb/LICENSE
similarity index 100%
rename from libraries/liblmdb/LICENSE
rename to Subtrees/mdb/libraries/liblmdb/LICENSE
diff --git a/libraries/liblmdb/Makefile b/Subtrees/mdb/libraries/liblmdb/Makefile
similarity index 87%
rename from libraries/liblmdb/Makefile
rename to Subtrees/mdb/libraries/liblmdb/Makefile
index 25c52ada8e..8255d8b438 100644
--- a/libraries/liblmdb/Makefile
+++ b/Subtrees/mdb/libraries/liblmdb/Makefile
@@ -3,7 +3,9 @@
########################################################################
# Configuration. The compiler options must enable threaded compilation.
#
-# Preprocessor macros (for CPPFLAGS) of interest:
+# Preprocessor macros (for CPPFLAGS) of interest...
+# Note that the defaults should already be correct for most
+# platforms; you should not need to change any of these:
#
# To compile successfully if the default does not:
# - MDB_USE_POSIX_SEM (enabled by default on BSD, Apple)
@@ -11,7 +13,7 @@
# semaphores and shared mutexes have different behaviors and
# different problems, see the Caveats section in lmdb.h.
#
-# For best performence or to compile successfully:
+# For best performance or to compile successfully:
# - MDB_DSYNC = "O_DSYNC" (default) or "O_SYNC" (less efficient)
# If O_DSYNC is undefined but exists in /usr/include,
# preferably set some compiler flag to get the definition.
@@ -25,14 +27,13 @@
# Data format:
# - MDB_MAXKEYSIZE
# Controls data packing and limits, see mdb.c.
-#
-# Debugging:
-# - MDB_DEBUG, MDB_PARANOID.
+# You might need to change this if the default size is too small.
#
CC = gcc
W = -W -Wall -Wno-unused-parameter -Wbad-function-cast
+THREADS = -pthread
OPT = -O2 -g
-CFLAGS = -pthread $(OPT) $(W) $(XCFLAGS)
+CFLAGS = $(THREADS) $(OPT) $(W) $(XCFLAGS)
LDLIBS =
SOLIBS =
prefix = /usr/local
diff --git a/libraries/liblmdb/lmdb.h b/Subtrees/mdb/libraries/liblmdb/lmdb.h
similarity index 97%
rename from libraries/liblmdb/lmdb.h
rename to Subtrees/mdb/libraries/liblmdb/lmdb.h
index 9f00a04202..b3cd5ef79e 100644
--- a/libraries/liblmdb/lmdb.h
+++ b/Subtrees/mdb/libraries/liblmdb/lmdb.h
@@ -166,7 +166,7 @@ typedef int mdb_filehandle_t;
/** Library minor version */
#define MDB_VERSION_MINOR 9
/** Library patch version */
-#define MDB_VERSION_PATCH 6
+#define MDB_VERSION_PATCH 7
/** Combine args a,b,c into a single integer for easy version comparisons */
#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c))
@@ -889,6 +889,15 @@ int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *d
*/
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
+ /** @brief Retrieve the DB flags for a database handle.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[out] flags Address where the flags will be returned.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_dbi_flags(MDB_env *env, MDB_dbi dbi, unsigned int *flags);
+
/** @brief Close a database handle.
*
* This call is not mutex protected. Handles should only be closed by
@@ -1289,6 +1298,31 @@ int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b);
* @return < 0 if a < b, 0 if a == b, > 0 if a > b
*/
int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b);
+
+ /** @brief A callback function used to print a message from the library.
+ *
+ * @param[in] msg The string to be printed.
+ * @param[in] ctx An arbitrary context pointer for the callback.
+ * @return < 0 on failure, 0 on success.
+ */
+typedef int (MDB_msg_func)(const char *msg, void *ctx);
+
+ /** @brief Dump the entries in the reader lock table.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] func A #MDB_msg_func function
+ * @param[in] ctx Anything the message function needs
+ * @return < 0 on failure, 0 on success.
+ */
+int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
+
+ /** @brief Check for stale entries in the reader lock table.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] dead Number of stale slots that were cleared
+ * @return 0 on success, non-zero on failure.
+ */
+int mdb_reader_check(MDB_env *env, int *dead);
/** @} */
#ifdef __cplusplus
diff --git a/libraries/liblmdb/mdb.c b/Subtrees/mdb/libraries/liblmdb/mdb.c
similarity index 91%
rename from libraries/liblmdb/mdb.c
rename to Subtrees/mdb/libraries/liblmdb/mdb.c
index 620e5b51ff..4d686007ba 100644
--- a/libraries/liblmdb/mdb.c
+++ b/Subtrees/mdb/libraries/liblmdb/mdb.c
@@ -344,8 +344,10 @@ static txnid_t mdb_debug_start;
*/
#define MDB_MAGIC 0xBEEFC0DE
- /** The version number for a database's file format. */
-#define MDB_VERSION 1
+ /** The version number for a database's datafile format. */
+#define MDB_DATA_VERSION 1
+ /** The version number for a database's lockfile format. */
+#define MDB_LOCK_VERSION 1
/** @brief The maximum size of a key in the database.
*
@@ -513,7 +515,7 @@ typedef struct MDB_txbody {
/** Stamp identifying this as an MDB file. It must be set
* to #MDB_MAGIC. */
uint32_t mtb_magic;
- /** Version number of this lock file. Must be set to #MDB_VERSION. */
+ /** Version number of this lock file. Must be set to #MDB_LOCK_VERSION. */
uint32_t mtb_version;
#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM)
char mtb_rmname[MNAME_LEN];
@@ -585,6 +587,7 @@ typedef struct MDB_page {
#define P_DIRTY 0x10 /**< dirty page */
#define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */
#define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */
+#define P_KEEP 0x8000 /**< leave this page alone during spill */
/** @} */
uint16_t mp_flags; /**< @ref mdb_page */
#define mp_lower mp_pb.pb.pb_lower
@@ -769,7 +772,7 @@ typedef struct MDB_meta {
/** Stamp identifying this as an MDB file. It must be set
* to #MDB_MAGIC. */
uint32_t mm_magic;
- /** Version number of this lock file. Must be set to #MDB_VERSION. */
+ /** Version number of this lock file. Must be set to #MDB_DATA_VERSION. */
uint32_t mm_version;
void *mm_address; /**< address for fixed mapping */
size_t mm_mapsize; /**< size of mmap region */
@@ -824,6 +827,10 @@ struct MDB_txn {
/** The list of pages that became unused during this transaction.
*/
MDB_IDL mt_free_pgs;
+ /** The list of dirty pages we temporarily wrote to disk
+ * because the dirty list was full.
+ */
+ MDB_IDL mt_spill_pgs;
union {
MDB_ID2L dirty_list; /**< for write txns: modified pages */
MDB_reader *reader; /**< this thread's reader table slot or NULL */
@@ -857,6 +864,7 @@ struct MDB_txn {
#define MDB_TXN_RDONLY 0x01 /**< read-only transaction */
#define MDB_TXN_ERROR 0x02 /**< an error has occurred */
#define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */
+#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */
/** @} */
unsigned int mt_flags; /**< @ref mdb_txn */
/** dirty_list maxsize - # of allocated pages allowed, including in parent txns */
@@ -944,6 +952,8 @@ struct MDB_env {
#define MDB_ENV_ACTIVE 0x20000000U
/** me_txkey is set */
#define MDB_ENV_TXKEY 0x10000000U
+ /** Have liveness lock in reader table */
+#define MDB_LIVE_READER 0x08000000U
uint32_t me_flags; /**< @ref mdb_env */
unsigned int me_psize; /**< size of a page, from #GET_PAGESIZE */
unsigned int me_maxreaders; /**< size of the reader table */
@@ -975,6 +985,7 @@ struct MDB_env {
/** Max size of a node on a page */
unsigned int me_nodemax;
#ifdef _WIN32
+ int me_pidquery; /**< Used in OpenProcess */
HANDLE me_rmutex; /* Windows mutexes don't reside in shared mem */
HANDLE me_wmutex;
#elif defined(MDB_USE_POSIX_SEM)
@@ -1306,7 +1317,7 @@ mdb_dpage_free(MDB_env *env, MDB_page *dp)
}
}
-/* Return all dirty pages to dpage list */
+/** Return all dirty pages to dpage list */
static void
mdb_dlist_free(MDB_txn *txn)
{
@@ -1320,6 +1331,183 @@ mdb_dlist_free(MDB_txn *txn)
dl[0].mid = 0;
}
+/* Set or clear P_KEEP in non-overflow, non-sub pages in known cursors.
+ * When clearing, only consider backup cursors (from parent txns) since
+ * other P_KEEP flags have already been cleared.
+ * @param[in] mc A cursor handle for the current operation.
+ * @param[in] pflags Flags of the pages to update:
+ * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it.
+ */
+static void
+mdb_cursorpages_mark(MDB_cursor *mc, unsigned pflags)
+{
+ MDB_txn *txn = mc->mc_txn;
+ MDB_cursor *m2, *m3;
+ MDB_xcursor *mx;
+ unsigned i, j;
+
+ if (mc->mc_flags & C_UNTRACK)
+ mc = NULL; /* will find mc in mt_cursors */
+ for (i = txn->mt_numdbs;; mc = txn->mt_cursors[--i]) {
+ for (; mc; mc=mc->mc_next) {
+ m2 = pflags == P_DIRTY ? mc : mc->mc_backup;
+ for (; m2; m2 = m2->mc_backup) {
+ for (m3=m2; m3->mc_flags & C_INITIALIZED; m3=&mx->mx_cursor) {
+ for (j=0; jmc_snum; j++)
+ if ((m3->mc_pg[j]->mp_flags & (P_SUBP|P_DIRTY|P_KEEP))
+ == pflags)
+ m3->mc_pg[j]->mp_flags ^= P_KEEP;
+ if (!(m3->mc_db->md_flags & MDB_DUPSORT))
+ break;
+ /* Cursor backups have mx malloced at the end of m2 */
+ mx = (m3 == mc ? m3->mc_xcursor : (MDB_xcursor *)(m3+1));
+ }
+ }
+ }
+ if (i == 0)
+ break;
+ }
+}
+
+static int mdb_page_flush(MDB_txn *txn);
+
+/** Spill pages from the dirty list back to disk.
+ * This is intended to prevent running into #MDB_TXN_FULL situations,
+ * but note that they may still occur in a few cases:
+ * 1) pages in #MDB_DUPSORT sub-DBs are never spilled, so if there
+ * are too many of these dirtied in one txn, the txn may still get
+ * too full.
+ * 2) child txns may run out of space if their parents dirtied a
+ * lot of pages and never spilled them. TODO: we probably should do
+ * a preemptive spill during #mdb_txn_begin() of a child txn, if
+ * the parent's dirty_room is below a given threshold.
+ * 3) our estimate of the txn size could be too small. At the
+ * moment this seems unlikely.
+ *
+ * Otherwise, if not using nested txns, it is expected that apps will
+ * not run into #MDB_TXN_FULL any more. The pages are flushed to disk
+ * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared.
+ * If the txn never references them again, they can be left alone.
+ * If the txn only reads them, they can be used without any fuss.
+ * If the txn writes them again, they can be dirtied immediately without
+ * going thru all of the work of #mdb_page_touch(). Such references are
+ * handled by #mdb_page_unspill().
+ *
+ * Also note, we never spill DB root pages, nor pages of active cursors,
+ * because we'll need these back again soon anyway. And in nested txns,
+ * we can't spill a page in a child txn if it was already spilled in a
+ * parent txn. That would alter the parent txns' data even though
+ * the child hasn't committed yet, and we'd have no way to undo it if
+ * the child aborted.
+ *
+ * @param[in] m0 cursor A cursor handle identifying the transaction and
+ * database for which we are checking space.
+ * @param[in] key For a put operation, the key being stored.
+ * @param[in] data For a put operation, the data being stored.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data)
+{
+ MDB_txn *txn = m0->mc_txn;
+ MDB_page *dp;
+ MDB_ID2L dl = txn->mt_u.dirty_list;
+ unsigned int i, j;
+ int rc;
+
+ if (m0->mc_flags & C_SUB)
+ return MDB_SUCCESS;
+
+ /* Estimate how much space this op will take */
+ i = m0->mc_db->md_depth;
+ /* Named DBs also dirty the main DB */
+ if (m0->mc_dbi > MAIN_DBI)
+ i += txn->mt_dbs[MAIN_DBI].md_depth;
+ /* For puts, roughly factor in the key+data size */
+ if (key)
+ i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize;
+ i += i; /* double it for good measure */
+
+ if (txn->mt_dirty_room > i)
+ return MDB_SUCCESS;
+
+ if (!txn->mt_spill_pgs) {
+ txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX);
+ if (!txn->mt_spill_pgs)
+ return ENOMEM;
+ }
+
+ /* Mark all the dirty root pages we want to preserve */
+ for (i=0; imt_numdbs; i++) {
+ if (txn->mt_dbflags[i] & DB_DIRTY) {
+ j = mdb_mid2l_search(dl, txn->mt_dbs[i].md_root);
+ if (j <= dl[0].mid) {
+ dp = dl[j].mptr;
+ dp->mp_flags |= P_KEEP;
+ }
+ }
+ }
+
+ /* Preserve pages used by cursors */
+ mdb_cursorpages_mark(m0, P_DIRTY);
+
+ /* Save the page IDs of all the pages we're flushing */
+ for (i=1; i<=dl[0].mid; i++) {
+ dp = dl[i].mptr;
+ if (dp->mp_flags & P_KEEP)
+ continue;
+ /* Can't spill twice, make sure it's not already in a parent's
+ * spill list.
+ */
+ if (txn->mt_parent) {
+ MDB_txn *tx2;
+ for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) {
+ if (tx2->mt_spill_pgs) {
+ j = mdb_midl_search(tx2->mt_spill_pgs, dl[i].mid);
+ if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == dl[i].mid) {
+ dp->mp_flags |= P_KEEP;
+ break;
+ }
+ }
+ }
+ if (tx2)
+ continue;
+ }
+ if ((rc = mdb_midl_append(&txn->mt_spill_pgs, dl[i].mid)))
+ return rc;
+ }
+ mdb_midl_sort(txn->mt_spill_pgs);
+
+ rc = mdb_page_flush(txn);
+
+ mdb_cursorpages_mark(m0, P_DIRTY|P_KEEP);
+
+ if (rc == 0) {
+ if (txn->mt_parent) {
+ MDB_txn *tx2;
+ pgno_t pgno = dl[i].mid;
+ txn->mt_dirty_room = txn->mt_parent->mt_dirty_room - dl[0].mid;
+ /* dirty pages that are dirty in an ancestor don't
+ * count against this txn's dirty_room.
+ */
+ for (i=1; i<=dl[0].mid; i++) {
+ for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) {
+ j = mdb_mid2l_search(tx2->mt_u.dirty_list, pgno);
+ if (j <= tx2->mt_u.dirty_list[0].mid &&
+ tx2->mt_u.dirty_list[j].mid == pgno) {
+ txn->mt_dirty_room++;
+ break;
+ }
+ }
+ }
+ } else {
+ txn->mt_dirty_room = MDB_IDL_UM_MAX - dl[0].mid;
+ }
+ txn->mt_flags |= MDB_TXN_SPILLS;
+ }
+ return rc;
+}
+
/** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */
static txnid_t
mdb_find_oldest(MDB_txn *txn)
@@ -1337,6 +1525,24 @@ mdb_find_oldest(MDB_txn *txn)
return oldest;
}
+/** Add a page to the txn's dirty list */
+static void
+mdb_page_dirty(MDB_txn *txn, MDB_page *mp)
+{
+ MDB_ID2 mid;
+ int (*insert)(MDB_ID2L, MDB_ID2 *);
+
+ if (txn->mt_env->me_flags & MDB_WRITEMAP) {
+ insert = mdb_mid2l_append;
+ } else {
+ insert = mdb_mid2l_insert;
+ }
+ mid.mid = mp->mp_pgno;
+ mid.mptr = mp;
+ insert(txn->mt_u.dirty_list, &mid);
+ txn->mt_dirty_room--;
+}
+
/** Allocate pages for writing.
* If there are free pages available from older transactions, they
* will be re-used first. Otherwise a new page will be allocated.
@@ -1367,11 +1573,9 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp)
pgno_t pgno, *mop = env->me_pghead;
unsigned i, j, k, mop_len = mop ? mop[0] : 0;
MDB_page *np;
- MDB_ID2 mid;
txnid_t oldest = 0, last;
MDB_cursor_op op;
MDB_cursor m2;
- int (*insert)(MDB_ID2L, MDB_ID2 *);
*mp = NULL;
@@ -1474,11 +1678,9 @@ mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp)
search_done:
if (env->me_flags & MDB_WRITEMAP) {
np = (MDB_page *)(env->me_map + env->me_psize * pgno);
- insert = mdb_mid2l_append;
} else {
if (!(np = mdb_page_malloc(txn, num)))
return ENOMEM;
- insert = mdb_mid2l_insert;
}
if (i) {
mop[0] = mop_len -= num;
@@ -1488,10 +1690,8 @@ search_done:
} else {
txn->mt_next_pgno = pgno + num;
}
- mid.mid = np->mp_pgno = pgno;
- mid.mptr = np;
- insert(txn->mt_u.dirty_list, &mid);
- txn->mt_dirty_room--;
+ np->mp_pgno = pgno;
+ mdb_page_dirty(txn, np);
*mp = np;
return MDB_SUCCESS;
@@ -1521,6 +1721,77 @@ mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize)
}
}
+/** Pull a page off the txn's spill list, if present.
+ * If a page being referenced was spilled to disk in this txn, bring
+ * it back and make it dirty/writable again.
+ * @param[in] tx0 the transaction handle.
+ * @param[in] mp the page being referenced.
+ * @param[out] ret the writable page, if any. ret is unchanged if
+ * mp wasn't spilled.
+ */
+static int
+mdb_page_unspill(MDB_txn *tx0, MDB_page *mp, MDB_page **ret)
+{
+ MDB_env *env = tx0->mt_env;
+ MDB_txn *txn;
+ unsigned x;
+ pgno_t pgno = mp->mp_pgno;
+
+ for (txn = tx0; txn; txn=txn->mt_parent) {
+ if (!txn->mt_spill_pgs)
+ continue;
+ x = mdb_midl_search(txn->mt_spill_pgs, pgno);
+ if (x <= txn->mt_spill_pgs[0] && txn->mt_spill_pgs[x] == pgno) {
+ MDB_page *np;
+ int num;
+ if (IS_OVERFLOW(mp))
+ num = mp->mp_pages;
+ else
+ num = 1;
+ if (env->me_flags & MDB_WRITEMAP) {
+ np = mp;
+ } else {
+ np = mdb_page_malloc(txn, num);
+ if (!np)
+ return ENOMEM;
+ if (num > 1)
+ memcpy(np, mp, num * env->me_psize);
+ else
+ mdb_page_copy(np, mp, env->me_psize);
+ }
+ if (txn == tx0) {
+ /* If in current txn, this page is no longer spilled */
+ for (; x < txn->mt_spill_pgs[0]; x++)
+ txn->mt_spill_pgs[x] = txn->mt_spill_pgs[x+1];
+ txn->mt_spill_pgs[0]--;
+ } /* otherwise, if belonging to a parent txn, the
+ * page remains spilled until child commits
+ */
+
+ if (txn->mt_parent) {
+ MDB_txn *tx2;
+ /* If this page is also in a parent's dirty list, then
+ * it's already accounted in dirty_room, and we need to
+ * cancel out the decrement that mdb_page_dirty does.
+ */
+ for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) {
+ x = mdb_mid2l_search(tx2->mt_u.dirty_list, pgno);
+ if (x <= tx2->mt_u.dirty_list[0].mid &&
+ tx2->mt_u.dirty_list[x].mid == pgno) {
+ txn->mt_dirty_room++;
+ break;
+ }
+ }
+ }
+ mdb_page_dirty(tx0, np);
+ np->mp_flags |= P_DIRTY;
+ *ret = np;
+ break;
+ }
+ }
+ return MDB_SUCCESS;
+}
+
/** Touch a page: make it dirty and re-insert into tree with updated pgno.
* @param[in] mc cursor pointing to the page to be touched
* @return 0 on success, non-zero on failure.
@@ -1536,6 +1807,14 @@ mdb_page_touch(MDB_cursor *mc)
int rc;
if (!F_ISSET(mp->mp_flags, P_DIRTY)) {
+ if (txn->mt_flags & MDB_TXN_SPILLS) {
+ np = NULL;
+ rc = mdb_page_unspill(txn, mp, &np);
+ if (rc)
+ return rc;
+ if (np)
+ goto done;
+ }
if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) ||
(rc = mdb_page_alloc(mc, 1, &np)))
return rc;
@@ -1583,6 +1862,7 @@ mdb_page_touch(MDB_cursor *mc)
np->mp_pgno = pgno;
np->mp_flags |= P_DIRTY;
+done:
/* Adjust cursors pointing to mp */
mc->mc_pg[mc->mc_top] = np;
dbi = mc->mc_dbi;
@@ -1717,6 +1997,56 @@ mdb_cursors_close(MDB_txn *txn, unsigned merge)
static void
mdb_txn_reset0(MDB_txn *txn, const char *act);
+#ifdef _WIN32
+enum Pidlock_op {
+ Pidset, Pidcheck
+};
+#else
+enum Pidlock_op {
+ Pidset = F_SETLK, Pidcheck = F_GETLK
+};
+#endif
+
+/** Set or check a pid lock. Set returns 0 on success.
+ * Check returns 0 if lock exists (meaning the process is alive).
+ *
+ * On Windows Pidset is a no-op, we merely check for the existence
+ * of the process with the given pid. On POSIX we use a single byte
+ * lock on the lockfile, set at an offset equal to the pid.
+ */
+static int
+mdb_reader_pid(MDB_env *env, enum Pidlock_op op, pid_t pid)
+{
+#ifdef _WIN32
+ HANDLE h;
+ int ver, query;
+ switch(op) {
+ case Pidset:
+ break;
+ case Pidcheck:
+ h = OpenProcess(env->me_pidquery, FALSE, pid);
+ if (!h)
+ return GetLastError();
+ CloseHandle(h);
+ break;
+ }
+ return 0;
+#else
+ int rc;
+ struct flock lock_info;
+ memset((void *)&lock_info, 0, sizeof(lock_info));
+ lock_info.l_type = F_WRLCK;
+ lock_info.l_whence = SEEK_SET;
+ lock_info.l_start = pid;
+ lock_info.l_len = 1;
+ while ((rc = fcntl(env->me_lfd, op, &lock_info)) &&
+ (rc = ErrCode()) == EINTR) ;
+ if (op == F_GETLK && rc == 0 && lock_info.l_type == F_UNLCK)
+ rc = -1;
+ return rc;
+#endif
+}
+
/** Common code for #mdb_txn_begin() and #mdb_txn_renew().
* @param[in] txn the transaction handle to initialize
* @return 0 on success, non-zero on failure.
@@ -1748,6 +2078,15 @@ mdb_txn_renew0(MDB_txn *txn)
pid_t pid = env->me_pid;
pthread_t tid = pthread_self();
+ if (!(env->me_flags & MDB_LIVE_READER)) {
+ rc = mdb_reader_pid(env, Pidset, pid);
+ if (rc) {
+ UNLOCK_MUTEX_R(env);
+ return rc;
+ }
+ env->me_flags |= MDB_LIVE_READER;
+ }
+
LOCK_MUTEX_R(env);
for (i=0; ime_txns->mti_numreaders; i++)
if (env->me_txns->mti_readers[i].mr_pid == 0)
@@ -1789,6 +2128,7 @@ mdb_txn_renew0(MDB_txn *txn)
txn->mt_u.dirty_list[0].mid = 0;
txn->mt_free_pgs = env->me_free_pgs;
txn->mt_free_pgs[0] = 0;
+ txn->mt_spill_pgs = NULL;
env->me_txn = txn;
}
@@ -1894,6 +2234,7 @@ mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret)
txn->mt_toggle = parent->mt_toggle;
txn->mt_dirty_room = parent->mt_dirty_room;
txn->mt_u.dirty_list[0].mid = 0;
+ txn->mt_spill_pgs = NULL;
txn->mt_next_pgno = parent->mt_next_pgno;
parent->mt_child = txn;
txn->mt_parent = parent;
@@ -1996,6 +2337,7 @@ mdb_txn_reset0(MDB_txn *txn, const char *act)
txn->mt_parent->mt_child = NULL;
env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate;
mdb_midl_free(txn->mt_free_pgs);
+ mdb_midl_free(txn->mt_spill_pgs);
free(txn->mt_u.dirty_list);
return;
}
@@ -2159,25 +2501,32 @@ mdb_freelist_save(MDB_txn *txn)
total_room += head_room;
}
- /* Fill in the reserved, touched me_pghead records. Avoid write ops
- * so they cannot rearrange anything, just read the destinations.
- */
+ /* Fill in the reserved, touched me_pghead records */
rc = MDB_SUCCESS;
if (mop_len) {
MDB_val key, data;
- mop += mop_len + 1;
+ mop += mop_len;
rc = mdb_cursor_first(&mc, &key, &data);
for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) {
- MDB_IDL dest = data.mv_data;
+ unsigned flags = MDB_CURRENT;
+ txnid_t id = *(txnid_t *)key.mv_data;
ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1;
+ MDB_ID save;
- assert(len >= 0 && *(txnid_t*)key.mv_data <= env->me_pglast);
- if (len > mop_len)
+ assert(len >= 0 && id <= env->me_pglast);
+ key.mv_data = &id;
+ if (len > mop_len) {
len = mop_len;
- *dest++ = len;
- memcpy(dest, mop -= len, len * sizeof(MDB_ID));
- if (! (mop_len -= len))
+ data.mv_size = (len + 1) * sizeof(MDB_ID);
+ flags = 0;
+ }
+ data.mv_data = mop -= len;
+ save = mop[0];
+ mop[0] = len;
+ rc = mdb_cursor_put(&mc, &key, &data, flags);
+ mop[0] = save;
+ if (rc || !(mop_len -= len))
break;
}
}
@@ -2191,7 +2540,7 @@ mdb_page_flush(MDB_txn *txn)
{
MDB_env *env = txn->mt_env;
MDB_ID2L dl = txn->mt_u.dirty_list;
- unsigned psize = env->me_psize;
+ unsigned psize = env->me_psize, j;
int i, pagecount = dl[0].mid, rc;
size_t size = 0, pos = 0;
pgno_t pgno = 0;
@@ -2205,13 +2554,20 @@ mdb_page_flush(MDB_txn *txn)
int n = 0;
#endif
+ j = 0;
if (env->me_flags & MDB_WRITEMAP) {
/* Clear dirty flags */
for (i = pagecount; i; i--) {
dp = dl[i].mptr;
+ /* Don't flush this page yet */
+ if (dp->mp_flags & P_KEEP) {
+ dp->mp_flags ^= P_KEEP;
+ dl[++j] = dl[i];
+ continue;
+ }
dp->mp_flags &= ~P_DIRTY;
}
- dl[0].mid = 0;
+ dl[0].mid = j;
return MDB_SUCCESS;
}
@@ -2219,6 +2575,12 @@ mdb_page_flush(MDB_txn *txn)
for (i = 1;; i++) {
if (i <= pagecount) {
dp = dl[i].mptr;
+ /* Don't flush this page yet */
+ if (dp->mp_flags & P_KEEP) {
+ dp->mp_flags ^= P_KEEP;
+ dl[i].mid = 0;
+ continue;
+ }
pgno = dl[i].mid;
/* clear dirty flag */
dp->mp_flags &= ~P_DIRTY;
@@ -2290,7 +2652,18 @@ mdb_page_flush(MDB_txn *txn)
#endif /* _WIN32 */
}
- mdb_dlist_free(txn);
+ j = 0;
+ for (i=1; i<=pagecount; i++) {
+ dp = dl[i].mptr;
+ /* This is a page we skipped above */
+ if (!dl[i].mid) {
+ dl[++j] = dl[i];
+ dl[j].mid = dp->mp_pgno;
+ continue;
+ }
+ mdb_dpage_free(env, dp);
+ }
+ dl[0].mid = j;
return MDB_SUCCESS;
}
@@ -2348,17 +2721,48 @@ mdb_txn_commit(MDB_txn *txn)
/* Update parent's DB table. */
memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db));
- txn->mt_parent->mt_numdbs = txn->mt_numdbs;
- txn->mt_parent->mt_dbflags[0] = txn->mt_dbflags[0];
- txn->mt_parent->mt_dbflags[1] = txn->mt_dbflags[1];
+ parent->mt_numdbs = txn->mt_numdbs;
+ parent->mt_dbflags[0] = txn->mt_dbflags[0];
+ parent->mt_dbflags[1] = txn->mt_dbflags[1];
for (i=2; imt_numdbs; i++) {
/* preserve parent's DB_NEW status */
- x = txn->mt_parent->mt_dbflags[i] & DB_NEW;
- txn->mt_parent->mt_dbflags[i] = txn->mt_dbflags[i] | x;
+ x = parent->mt_dbflags[i] & DB_NEW;
+ parent->mt_dbflags[i] = txn->mt_dbflags[i] | x;
}
- dst = txn->mt_parent->mt_u.dirty_list;
+ dst = parent->mt_u.dirty_list;
src = txn->mt_u.dirty_list;
+ /* Remove anything in our dirty list from parent's spill list */
+ if (parent->mt_spill_pgs) {
+ x = parent->mt_spill_pgs[0];
+ len = x;
+ /* zero out our dirty pages in parent spill list */
+ for (i=1; i<=src[0].mid; i++) {
+ if (src[i].mid < parent->mt_spill_pgs[x])
+ continue;
+ if (src[i].mid > parent->mt_spill_pgs[x]) {
+ if (x <= 1)
+ break;
+ x--;
+ continue;
+ }
+ parent->mt_spill_pgs[x] = 0;
+ len--;
+ }
+ /* OK, we had a few hits, squash zeros from the spill list */
+ if (len < parent->mt_spill_pgs[0]) {
+ x=1;
+ for (y=1; y<=parent->mt_spill_pgs[0]; y++) {
+ if (parent->mt_spill_pgs[y]) {
+ if (y != x) {
+ parent->mt_spill_pgs[x] = parent->mt_spill_pgs[y];
+ }
+ x++;
+ }
+ }
+ parent->mt_spill_pgs[0] = len;
+ }
+ }
/* Find len = length of merging our dirty list with parent's */
x = dst[0].mid;
dst[0].mid = 0; /* simplify loops */
@@ -2390,8 +2794,17 @@ mdb_txn_commit(MDB_txn *txn)
dst[0].mid = len;
free(txn->mt_u.dirty_list);
parent->mt_dirty_room = txn->mt_dirty_room;
+ if (txn->mt_spill_pgs) {
+ if (parent->mt_spill_pgs) {
+ mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs);
+ mdb_midl_free(txn->mt_spill_pgs);
+ mdb_midl_sort(parent->mt_spill_pgs);
+ } else {
+ parent->mt_spill_pgs = txn->mt_spill_pgs;
+ }
+ }
- txn->mt_parent->mt_child = NULL;
+ parent->mt_child = NULL;
mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead);
free(txn);
return MDB_SUCCESS;
@@ -2487,6 +2900,8 @@ mdb_env_read_header(MDB_env *env, MDB_meta *meta)
memset(&ov, 0, sizeof(ov));
ov.Offset = off;
rc = ReadFile(env->me_fd,&pbuf,MDB_PAGESIZE,&len,&ov) ? (int)len : -1;
+ if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF)
+ rc = 0;
#else
rc = pread(env->me_fd, &pbuf, MDB_PAGESIZE, off);
#endif
@@ -2511,9 +2926,9 @@ mdb_env_read_header(MDB_env *env, MDB_meta *meta)
return MDB_INVALID;
}
- if (m->mm_version != MDB_VERSION) {
+ if (m->mm_version != MDB_DATA_VERSION) {
DPRINTF("database is version %u, expected version %u",
- m->mm_version, MDB_VERSION);
+ m->mm_version, MDB_DATA_VERSION);
return MDB_VERSION_MISMATCH;
}
@@ -2540,7 +2955,7 @@ mdb_env_init_meta(MDB_env *env, MDB_meta *meta)
GET_PAGESIZE(psize);
meta->mm_magic = MDB_MAGIC;
- meta->mm_version = MDB_VERSION;
+ meta->mm_version = MDB_DATA_VERSION;
meta->mm_mapsize = env->me_mapsize;
meta->mm_psize = psize;
meta->mm_last_pg = 1;
@@ -2808,6 +3223,14 @@ mdb_env_open2(MDB_env *env)
LONG sizelo, sizehi;
sizelo = env->me_mapsize & 0xffffffff;
sizehi = env->me_mapsize >> 16 >> 16; /* only needed on Win64 */
+
+ /* See if we should use QueryLimited */
+ rc = GetVersion();
+ if ((rc & 0xff) > 5)
+ env->me_pidquery = PROCESS_QUERY_LIMITED_INFORMATION;
+ else
+ env->me_pidquery = PROCESS_QUERY_INFORMATION;
+
/* Windows won't create mappings for zero length files.
* Just allocate the maxsize right now.
*/
@@ -3301,7 +3724,7 @@ mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl)
pthread_mutexattr_destroy(&mattr);
#endif /* _WIN32 || MDB_USE_POSIX_SEM */
- env->me_txns->mti_version = MDB_VERSION;
+ env->me_txns->mti_version = MDB_LOCK_VERSION;
env->me_txns->mti_magic = MDB_MAGIC;
env->me_txns->mti_txnid = 0;
env->me_txns->mti_numreaders = 0;
@@ -3312,9 +3735,9 @@ mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl)
rc = MDB_INVALID;
goto fail;
}
- if (env->me_txns->mti_version != MDB_VERSION) {
+ if (env->me_txns->mti_version != MDB_LOCK_VERSION) {
DPRINTF("lock region is version %u, expected version %u",
- env->me_txns->mti_version, MDB_VERSION);
+ env->me_txns->mti_version, MDB_LOCK_VERSION);
rc = MDB_VERSION_MISMATCH;
goto fail;
}
@@ -3970,6 +4393,19 @@ mdb_page_get(MDB_txn *txn, pgno_t pgno, MDB_page **ret, int *lvl)
level = 1;
do {
MDB_ID2L dl = tx2->mt_u.dirty_list;
+ unsigned x;
+ /* Spilled pages were dirtied in this txn and flushed
+ * because the dirty list got full. Bring this page
+ * back in from the map (but don't unspill it here,
+ * leave that unless page_touch happens again).
+ */
+ if (tx2->mt_spill_pgs) {
+ x = mdb_midl_search(tx2->mt_spill_pgs, pgno);
+ if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pgno) {
+ p = (MDB_page *)(txn->mt_env->me_map + txn->mt_env->me_psize * pgno);
+ goto done;
+ }
+ }
if (dl[0].mid) {
unsigned x = mdb_mid2l_search(dl, pgno);
if (x <= dl[0].mid && dl[x].mid == pgno) {
@@ -4070,6 +4506,8 @@ mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int modify)
DPRINTF("found leaf page %zu for key [%s]", mp->mp_pgno,
key ? DKEY(key) : NULL);
+ mc->mc_flags |= C_INITIALIZED;
+ mc->mc_flags &= ~C_EOF;
return MDB_SUCCESS;
}
@@ -4197,11 +4635,21 @@ mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp)
int rc;
DPRINTF("free ov page %zu (%d)", pg, ovpages);
- /* If the page is dirty we just acquired it, so we should
- * give it back to our current free list, if any.
+ /* If the page is dirty or on the spill list we just acquired it,
+ * so we should give it back to our current free list, if any.
* Not currently supported in nested txns.
* Otherwise put it onto the list of pages we freed in this txn.
*/
+ if (!(mp->mp_flags & P_DIRTY) && txn->mt_spill_pgs) {
+ unsigned x = mdb_midl_search(txn->mt_spill_pgs, pg);
+ if (x <= txn->mt_spill_pgs[0] && txn->mt_spill_pgs[x] == pg) {
+ /* This page is no longer spilled */
+ for (; x < txn->mt_spill_pgs[0]; x++)
+ txn->mt_spill_pgs[x] = txn->mt_spill_pgs[x+1];
+ txn->mt_spill_pgs[0]--;
+ goto release;
+ }
+ }
if ((mp->mp_flags & P_DIRTY) && !txn->mt_parent && env->me_pghead) {
unsigned j, x;
pgno_t *mop;
@@ -4227,6 +4675,7 @@ mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp)
}
if (!(env->me_flags & MDB_WRITEMAP))
mdb_dpage_free(env, mp);
+release:
/* Insert in me_pghead */
mop = env->me_pghead;
j = mop[0] + ovpages;
@@ -4514,6 +4963,9 @@ mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data,
assert(key);
assert(key->mv_size > 0);
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+
/* See if we're already on the right page */
if (mc->mc_flags & C_INITIALIZED) {
MDB_val nodekey;
@@ -4686,6 +5138,9 @@ mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data)
int rc;
MDB_node *leaf;
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+
if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) {
rc = mdb_page_search(mc, NULL, 0);
if (rc != MDB_SUCCESS)
@@ -4712,8 +5167,6 @@ mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data)
if (rc)
return rc;
} else {
- if (mc->mc_xcursor)
- mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS)
return rc;
}
@@ -4729,6 +5182,9 @@ mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data)
int rc;
MDB_node *leaf;
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+
if (!(mc->mc_flags & C_EOF)) {
if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) {
@@ -4760,8 +5216,6 @@ mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data)
if (rc)
return rc;
} else {
- if (mc->mc_xcursor)
- mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS)
return rc;
}
@@ -4943,16 +5397,20 @@ mdb_cursor_touch(MDB_cursor *mc)
return MDB_SUCCESS;
}
+/** Do not spill pages to disk if txn is getting full, may fail instead */
+#define MDB_NOSPILL 0x8000
+
int
mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
unsigned int flags)
{
+ enum { MDB_NO_ROOT = MDB_LAST_ERRCODE+10 }; /* internal code */
MDB_node *leaf = NULL;
MDB_val xdata, *rdata, dkey;
MDB_page *fp;
MDB_db dummy;
int do_sub = 0, insert = 0;
- unsigned int mcount = 0, dcount = 0;
+ unsigned int mcount = 0, dcount = 0, nospill;
size_t nsize;
int rc, rc2;
MDB_pagebuf pbuf;
@@ -4970,6 +5428,9 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
return EINVAL;
}
+ nospill = flags & MDB_NOSPILL;
+ flags &= ~MDB_NOSPILL;
+
if (F_ISSET(mc->mc_txn->mt_flags, MDB_TXN_RDONLY))
return EACCES;
@@ -4994,23 +5455,10 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
return EINVAL;
rc = MDB_SUCCESS;
} else if (mc->mc_db->md_root == P_INVALID) {
- MDB_page *np;
- /* new database, write a root leaf page */
- DPUTS("allocating new root leaf page");
- if ((rc = mdb_page_new(mc, P_LEAF, 1, &np))) {
- return rc;
- }
+ /* new database, cursor has nothing to point to */
mc->mc_snum = 0;
- mdb_cursor_push(mc, np);
- mc->mc_db->md_root = np->mp_pgno;
- mc->mc_db->md_depth++;
- *mc->mc_dbflag |= DB_DIRTY;
- if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED))
- == MDB_DUPFIXED)
- np->mp_flags |= P_LEAF2;
- mc->mc_flags |= C_INITIALIZED;
- rc = MDB_NOTFOUND;
- goto top;
+ mc->mc_flags &= ~C_INITIALIZED;
+ rc = MDB_NO_ROOT;
} else {
int exact = 0;
MDB_val d2;
@@ -5028,7 +5476,7 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
}
}
} else {
- rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact);
+ rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact);
}
if ((flags & MDB_NOOVERWRITE) && rc == 0) {
DPRINTF("duplicate key [%s]", DKEY(key));
@@ -5039,12 +5487,40 @@ mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
return rc;
}
- /* Cursor is positioned, now make sure all pages are writable */
- rc2 = mdb_cursor_touch(mc);
- if (rc2)
- return rc2;
+ /* Cursor is positioned, check for room in the dirty list */
+ if (!nospill) {
+ if (flags & MDB_MULTIPLE) {
+ rdata = &xdata;
+ xdata.mv_size = data->mv_size * dcount;
+ } else {
+ rdata = data;
+ }
+ if ((rc2 = mdb_page_spill(mc, key, rdata)))
+ return rc2;
+ }
+
+ if (rc == MDB_NO_ROOT) {
+ MDB_page *np;
+ /* new database, write a root leaf page */
+ DPUTS("allocating new root leaf page");
+ if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) {
+ return rc2;
+ }
+ mdb_cursor_push(mc, np);
+ mc->mc_db->md_root = np->mp_pgno;
+ mc->mc_db->md_depth++;
+ *mc->mc_dbflag |= DB_DIRTY;
+ if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED))
+ == MDB_DUPFIXED)
+ np->mp_flags |= P_LEAF2;
+ mc->mc_flags |= C_INITIALIZED;
+ } else {
+ /* make sure all cursor pages are writable */
+ rc2 = mdb_cursor_touch(mc);
+ if (rc2)
+ return rc2;
+ }
-top:
/* The key already exists */
if (rc == MDB_SUCCESS) {
/* there's only a key anyway, so this is a no-op */
@@ -5200,8 +5676,18 @@ current:
return rc2;
ovpages = omp->mp_pages;
- /* Is the ov page writable and large enough? */
- if ((omp->mp_flags & P_DIRTY) && ovpages >= dpages) {
+ /* Is the ov page large enough? */
+ if (ovpages >= dpages) {
+ if (!(omp->mp_flags & P_DIRTY) &&
+ (level || (mc->mc_txn->mt_env->me_flags & MDB_WRITEMAP)))
+ {
+ rc = mdb_page_unspill(mc->mc_txn, omp, &omp);
+ if (rc)
+ return rc;
+ level = 0; /* dirty in this txn or clean */
+ }
+ /* Is it dirty? */
+ if (omp->mp_flags & P_DIRTY) {
/* yes, overwrite it. Note in this case we don't
* bother to try shrinking the page if the new data
* is smaller than the overflow threshold.
@@ -5234,10 +5720,10 @@ current:
else
memcpy(METADATA(omp), data->mv_data, data->mv_size);
goto done;
- } else {
- if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS)
- return rc2;
+ }
}
+ if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS)
+ return rc2;
} else if (NODEDSZ(leaf) == data->mv_size) {
/* same size, just replace it. Note that we could
* also reuse this node if the new data is smaller,
@@ -5310,10 +5796,11 @@ put_sub:
xdata.mv_data = "";
leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
if (flags & MDB_CURRENT) {
- xflags = MDB_CURRENT;
+ xflags = MDB_CURRENT|MDB_NOSPILL;
} else {
mdb_xcursor_init1(mc, leaf);
- xflags = (flags & MDB_NODUPDATA) ? MDB_NOOVERWRITE : 0;
+ xflags = (flags & MDB_NODUPDATA) ?
+ MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL;
}
/* converted, write the original data first */
if (dkey.mv_size) {
@@ -5328,6 +5815,7 @@ put_sub:
for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
+ if (!(m2->mc_flags & C_INITIALIZED)) continue;
if (m2->mc_pg[i] == mp && m2->mc_ki[i] == mc->mc_ki[i]) {
mdb_xcursor_init1(m2, leaf);
}
@@ -5383,6 +5871,10 @@ mdb_cursor_del(MDB_cursor *mc, unsigned int flags)
if (!(mc->mc_flags & C_INITIALIZED))
return EINVAL;
+ if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL)))
+ return rc;
+ flags &= ~MDB_NOSPILL; /* TODO: Or change (flags != MDB_NODUPDATA) to ~(flags & MDB_NODUPDATA), not looking at the logic of that code just now */
+
rc = mdb_cursor_touch(mc);
if (rc)
return rc;
@@ -5394,7 +5886,7 @@ mdb_cursor_del(MDB_cursor *mc, unsigned int flags)
if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) {
mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf);
}
- rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, 0);
+ rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL);
/* If sub-DB still has entries, we're done */
if (mc->mc_xcursor->mx_db.md_entries) {
if (leaf->mn_flags & F_SUBDATA) {
@@ -5852,6 +6344,7 @@ mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node)
static void
mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx)
{
+ mc->mc_next = NULL;
mc->mc_backup = NULL;
mc->mc_dbi = dbi;
mc->mc_txn = txn;
@@ -6641,6 +7134,7 @@ mdb_del(MDB_txn *txn, MDB_dbi dbi,
* run out of space, triggering a split. We need this
* cursor to be consistent until the end of the rebalance.
*/
+ mc.mc_flags |= C_UNTRACK;
mc.mc_next = txn->mt_cursors[dbi];
txn->mt_cursors[dbi] = &mc;
rc = mdb_cursor_del(&mc, data ? 0 : MDB_NODUPDATA);
@@ -7025,7 +7519,7 @@ done:
m3 = m2;
if (m3 == mc)
continue;
- if (!(m3->mc_flags & C_INITIALIZED))
+ if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED))
continue;
if (m3->mc_flags & C_SPLITTING)
continue;
@@ -7321,6 +7815,15 @@ void mdb_dbi_close(MDB_env *env, MDB_dbi dbi)
free(ptr);
}
+int mdb_dbi_flags(MDB_env *env, MDB_dbi dbi, unsigned int *flags)
+{
+ /* We could return the flags for the FREE_DBI too but what's the point? */
+ if (dbi <= MAIN_DBI || dbi >= env->me_numdbs)
+ return EINVAL;
+ *flags = env->me_dbflags[dbi];
+ return MDB_SUCCESS;
+}
+
/** Add all the DB's pages to the free list.
* @param[in] mc Cursor on the DB to free.
* @param[in] subs non-Zero to check for sub-DBs in this DB.
@@ -7485,4 +7988,125 @@ int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx)
return MDB_SUCCESS;
}
+int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx)
+{
+ unsigned int i, rdrs;
+ MDB_reader *mr;
+ char buf[64];
+ int first = 1;
+
+ if (!env || !func)
+ return -1;
+ if (!env->me_txns) {
+ return func("(no reader locks)\n", ctx);
+ }
+ rdrs = env->me_txns->mti_numreaders;
+ mr = env->me_txns->mti_readers;
+ for (i=0; i> 1;
+ cursor = base + pivot + 1;
+ val = pid - ids[cursor];
+
+ if( val < 0 ) {
+ n = pivot;
+
+ } else if ( val > 0 ) {
+ base = cursor;
+ n -= pivot + 1;
+
+ } else {
+ /* found, so it's a duplicate */
+ return -1;
+ }
+ }
+
+ if( val > 0 ) {
+ ++cursor;
+ }
+ ids[0]++;
+ for (n = ids[0]; n > cursor; n--)
+ ids[n] = ids[n-1];
+ ids[n] = pid;
+ return 0;
+}
+
+int mdb_reader_check(MDB_env *env, int *dead)
+{
+ unsigned int i, j, rdrs;
+ MDB_reader *mr;
+ pid_t *pids, pid;
+ int count = 0;
+
+ if (!env)
+ return EINVAL;
+ if (dead)
+ *dead = 0;
+ if (!env->me_txns)
+ return MDB_SUCCESS;
+ rdrs = env->me_txns->mti_numreaders;
+ pids = malloc((rdrs+1) * sizeof(pid_t));
+ if (!pids)
+ return ENOMEM;
+ pids[0] = 0;
+ mr = env->me_txns->mti_readers;
+ j = 0;
+ for (i=0; ime_pid) {
+ pid = mr[i].mr_pid;
+ if (mdb_pid_insert(pids, pid) == 0) {
+ if (mdb_reader_pid(env, Pidcheck, pid)) {
+ LOCK_MUTEX_R(env);
+ if (mdb_reader_pid(env, Pidcheck, pid)) {
+ for (j=i; j 1) {
+ int dead;
+ mdb_reader_check(env, &dead);
+ printf(" %d stale readers cleared.\n", dead);
+ rc = mdb_reader_list(env, (MDB_msg_func *)fputs, stdout);
+ }
+ if (!(subname || alldbs || freinfo))
+ goto env_close;
+ }
+
+ rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
+ if (rc) {
+ printf("mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc));
+ goto env_close;
+ }
+
if (freinfo) {
MDB_cursor *cursor;
MDB_val key, data;
diff --git a/libraries/liblmdb/midl.c b/Subtrees/mdb/libraries/liblmdb/midl.c
similarity index 99%
rename from libraries/liblmdb/midl.c
rename to Subtrees/mdb/libraries/liblmdb/midl.c
index e7bd680cb0..86e4592d2d 100644
--- a/libraries/liblmdb/midl.c
+++ b/Subtrees/mdb/libraries/liblmdb/midl.c
@@ -31,8 +31,7 @@
*/
#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) )
-#if 0 /* superseded by append/sort */
-static unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
+unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
{
/*
* binary search of id in ids
@@ -67,6 +66,7 @@ static unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
return cursor;
}
+#if 0 /* superseded by append/sort */
int mdb_midl_insert( MDB_IDL ids, MDB_ID id )
{
unsigned x, i;
diff --git a/libraries/liblmdb/midl.h b/Subtrees/mdb/libraries/liblmdb/midl.h
similarity index 94%
rename from libraries/liblmdb/midl.h
rename to Subtrees/mdb/libraries/liblmdb/midl.h
index 9ce7133c6e..b0bdff3f49 100644
--- a/libraries/liblmdb/midl.h
+++ b/Subtrees/mdb/libraries/liblmdb/midl.h
@@ -74,14 +74,12 @@ typedef MDB_ID *MDB_IDL;
xidl[xlen] = (id); \
} while (0)
-#if 0 /* superseded by append/sort */
- /** Insert an ID into an IDL.
- * @param[in,out] ids The IDL to insert into.
- * @param[in] id The ID to insert.
- * @return 0 on success, -1 if ID was already present, -2 on error.
+ /** Search for an ID in an IDL.
+ * @param[in] ids The IDL to search.
+ * @param[in] id The ID to search for.
+ * @return The index of the first ID greater than or equal to \b id.
*/
-int mdb_midl_insert( MDB_IDL ids, MDB_ID id );
-#endif
+unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id );
/** Allocate an IDL.
* Allocates memory for an IDL of the given size.
diff --git a/libraries/liblmdb/mtest.c b/Subtrees/mdb/libraries/liblmdb/mtest.c
similarity index 100%
rename from libraries/liblmdb/mtest.c
rename to Subtrees/mdb/libraries/liblmdb/mtest.c
diff --git a/libraries/liblmdb/mtest2.c b/Subtrees/mdb/libraries/liblmdb/mtest2.c
similarity index 100%
rename from libraries/liblmdb/mtest2.c
rename to Subtrees/mdb/libraries/liblmdb/mtest2.c
diff --git a/libraries/liblmdb/mtest3.c b/Subtrees/mdb/libraries/liblmdb/mtest3.c
similarity index 100%
rename from libraries/liblmdb/mtest3.c
rename to Subtrees/mdb/libraries/liblmdb/mtest3.c
diff --git a/libraries/liblmdb/mtest4.c b/Subtrees/mdb/libraries/liblmdb/mtest4.c
similarity index 100%
rename from libraries/liblmdb/mtest4.c
rename to Subtrees/mdb/libraries/liblmdb/mtest4.c
diff --git a/libraries/liblmdb/mtest5.c b/Subtrees/mdb/libraries/liblmdb/mtest5.c
similarity index 100%
rename from libraries/liblmdb/mtest5.c
rename to Subtrees/mdb/libraries/liblmdb/mtest5.c
diff --git a/libraries/liblmdb/mtest6.c b/Subtrees/mdb/libraries/liblmdb/mtest6.c
similarity index 100%
rename from libraries/liblmdb/mtest6.c
rename to Subtrees/mdb/libraries/liblmdb/mtest6.c
diff --git a/libraries/liblmdb/sample-bdb.c b/Subtrees/mdb/libraries/liblmdb/sample-bdb.c
similarity index 100%
rename from libraries/liblmdb/sample-bdb.c
rename to Subtrees/mdb/libraries/liblmdb/sample-bdb.c
diff --git a/libraries/liblmdb/sample-mdb.c b/Subtrees/mdb/libraries/liblmdb/sample-mdb.c
similarity index 100%
rename from libraries/liblmdb/sample-mdb.c
rename to Subtrees/mdb/libraries/liblmdb/sample-mdb.c
diff --git a/TODO.txt b/TODO.txt
index 908475b5f3..69e5eda3da 100644
--- a/TODO.txt
+++ b/TODO.txt
@@ -2,16 +2,40 @@
RIPPLE TODO
--------------------------------------------------------------------------------
-- Examples for different backend key/value config settings
+Items marked '*' can be handled by third parties.
-- Unit Test attention
-- NodeStore backend unit test
-
-- Validations unit test
+Vinnie's Short List (Changes day to day)
+- Make theConfig a SharedSingleton to prevent leak warnings
+- Add fast backend to the unit test
+- Refactor Section code into ConfigFile
+- Change NodeStore config file format to multiline key/value pairs
+- Improved Mutex to track deadlocks
+- Memory NodeStore::Backend for unit tests [*]
+- Finish unit tests and code for Validators
+- Import beast::db and use it in SQliteBackend
+- Convert some Ripple boost unit tests to Beast. [*]
+- Move all code into modules/
+- Work on KeyvaDB
+[*] These can be handled by external developers
--------------------------------------------------------------------------------
+- Raise the warning level and fix everything
+
+* Restyle all the macros in ripple_ConfigSection.h
+
+* Replace all throw with beast::Throw
+ Only in the ripple sources, not in Subtrees/ or protobuf or websocket
+
+- Replace base_uint and uintXXX with UnsignedInteger
+ * Need to specialize UnsignedInteger to work efficiently with 4 and 8 byte
+ multiples of the size.
+
+- Rewrite boost program_options in Beast
+
+- Validations unit test
+
- Replace endian conversion calls with beast calls:
htobe32, be32toh, ntohl, etc...
Start by removing the system headers which provide these routines, if possible
@@ -118,8 +142,6 @@ RIPPLE TODO
- Make LevelDB and Ripple code work with both Unicode and non-Unicode Windows APIs
-- Raise the warning level and fix everything
-
- Go searching through VFALCO notes and fix everything
- Deal with function-level statics used for SqliteDatabase (like in
diff --git a/modules/ripple_app/data/ripple_DBInit.cpp b/modules/ripple_app/data/ripple_DBInit.cpp
index 8639fe035b..a2d6ba1a7d 100644
--- a/modules/ripple_app/data/ripple_DBInit.cpp
+++ b/modules/ripple_app/data/ripple_DBInit.cpp
@@ -283,32 +283,15 @@ const char* WalletDBInit[] =
int WalletDBCount = NUMBER (WalletDBInit);
// Hash node database holds nodes indexed by hash
-const char* HashNodeDBInit[] =
-{
- "PRAGMA synchronous=NORMAL;",
- "PRAGMA journal_mode=WAL;",
- "PRAGMA journal_size_limit=1582080;",
-
-#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
- "PRAGMA mmap_size=171798691840;",
-#endif
-
- "BEGIN TRANSACTION;",
-
- "CREATE TABLE CommittedObjects ( \
- Hash CHARACTER(64) PRIMARY KEY, \
- ObjType CHAR(1) NOT NULL, \
- LedgerIndex BIGINT UNSIGNED, \
- Object BLOB \
- );",
-
- "END TRANSACTION;"
-};
+// VFALCO TODO Remove this since it looks unused
+/*
int HashNodeDBCount = NUMBER (HashNodeDBInit);
+*/
// Net node database holds nodes seen on the network
// XXX Not really used needs replacement.
+/*
const char* NetNodeDBInit[] =
{
"CREATE TABLE KnownNodes ( \
@@ -320,7 +303,10 @@ const char* NetNodeDBInit[] =
};
int NetNodeDBCount = NUMBER (NetNodeDBInit);
+*/
+// This appears to be unused
+/*
const char* PathFindDBInit[] =
{
"PRAGMA synchronous = OFF; ",
@@ -353,5 +339,5 @@ const char* PathFindDBInit[] =
};
int PathFindDBCount = NUMBER (PathFindDBInit);
+*/
-// vim:ts=4
diff --git a/modules/ripple_app/data/ripple_DBInit.h b/modules/ripple_app/data/ripple_DBInit.h
index d6111f9612..489b511588 100644
--- a/modules/ripple_app/data/ripple_DBInit.h
+++ b/modules/ripple_app/data/ripple_DBInit.h
@@ -12,19 +12,11 @@ extern const char* RpcDBInit[];
extern const char* TxnDBInit[];
extern const char* LedgerDBInit[];
extern const char* WalletDBInit[];
-extern const char* HashNodeDBInit[];
// VFALCO TODO Figure out what these counts are for
extern int RpcDBCount;
extern int TxnDBCount;
extern int LedgerDBCount;
extern int WalletDBCount;
-extern int HashNodeDBCount;
-
-// VFALCO TODO Seems these two aren't used so delete EVERYTHING.
-extern const char* NetNodeDBInit[];
-extern const char* PathFindDBInit[];
-extern int NetNodeDBCount;
-extern int PathFindDBCount;
#endif
diff --git a/modules/ripple_app/ledger/Ledger.cpp b/modules/ripple_app/ledger/Ledger.cpp
index 54bbd48df9..3614fbbe07 100644
--- a/modules/ripple_app/ledger/Ledger.cpp
+++ b/modules/ripple_app/ledger/Ledger.cpp
@@ -529,10 +529,12 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
assert (getTransHash () == mTransactionMap->getHash ());
// Save the ledger header in the hashed object store
- Serializer s (128);
- s.add32 (HashPrefix::ledgerMaster);
- addRaw (s);
- getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
+ {
+ Serializer s (128);
+ s.add32 (HashPrefix::ledgerMaster);
+ addRaw (s);
+ getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash);
+ }
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
diff --git a/modules/ripple_app/ledger/ripple_InboundLedger.cpp b/modules/ripple_app/ledger/ripple_InboundLedger.cpp
index 30e8b3e0b3..40ef7c216a 100644
--- a/modules/ripple_app/ledger/ripple_InboundLedger.cpp
+++ b/modules/ripple_app/ledger/ripple_InboundLedger.cpp
@@ -48,7 +48,7 @@ bool InboundLedger::tryLocal ()
if (!mHaveBase)
{
// Nothing we can do without the ledger base
- NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash);
+ NodeObject::pointer node = getApp().getNodeStore ().fetch (mHash);
if (!node)
{
@@ -672,7 +672,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has
Serializer s (data.size () + 4);
s.add32 (HashPrefix::ledgerMaster);
s.addRaw (data);
- getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
+ getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.modData (), mHash);
progress ();
diff --git a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp
index 12a3892378..860a53b4b5 100644
--- a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp
+++ b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp
@@ -6,118 +6,208 @@
#if RIPPLE_HYPERLEVELDB_AVAILABLE
-class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend
+class HyperLevelDBBackendFactory::Backend
+ : public NodeStore::Backend
+ , public NodeStore::BatchWriter::Callback
+ , LeakChecked
{
public:
- Backend (StringPairArray const& keyValues)
- : mName(keyValues ["path"].toStdString ())
- , mDB(NULL)
+ typedef RecycledObjectPool StringPool;
+
+ Backend (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
+ : m_keyBytes (keyBytes)
+ , m_scheduler (scheduler)
+ , m_batch (*this, scheduler)
+ , m_name (keyValues ["path"].toStdString ())
{
- if (mName.empty())
- throw std::runtime_error ("Missing path in LevelDB backend");
+ if (m_name.empty ())
+ Throw (std::runtime_error ("Missing path in LevelDB backend"));
hyperleveldb::Options options;
options.create_if_missing = true;
- if (keyValues["cache_mb"].isEmpty())
+ if (keyValues ["cache_mb"].isEmpty ())
+ {
options.block_cache = hyperleveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
+ }
else
+ {
options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
+ }
- if (keyValues["filter_bits"].isEmpty())
+ if (keyValues ["filter_bits"].isEmpty())
{
if (theConfig.NODE_SIZE >= 2)
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10);
}
- else if (keyValues["filter_bits"].getIntValue() != 0)
- options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
+ else if (keyValues ["filter_bits"].getIntValue() != 0)
+ {
+ options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ());
+ }
- if (!keyValues["open_files"].isEmpty())
- options.max_open_files = keyValues["open_files"].getIntValue();
+ if (! keyValues["open_files"].isEmpty ())
+ {
+ options.max_open_files = keyValues ["open_files"].getIntValue();
+ }
- hyperleveldb::Status status = hyperleveldb::DB::Open (options, mName, &mDB);
- if (!status.ok () || !mDB)
- throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
+ hyperleveldb::DB* db = nullptr;
+ hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db);
+ if (!status.ok () || !db)
+ Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
+
+ m_db = db;
}
~Backend ()
{
- delete mDB;
}
- std::string getDataBaseName()
+ std::string getName()
{
- return mName;
+ return m_name;
}
- bool bulkStore (const std::vector< NodeObject::pointer >& objs)
- {
- hyperleveldb::WriteBatch batch;
+ //--------------------------------------------------------------------------
+
+ Status fetch (void const* key, NodeObject::Ptr* pObject)
+ {
+ pObject->reset ();
+
+ Status status (ok);
+
+ hyperleveldb::ReadOptions const options;
+ hyperleveldb::Slice const slice (static_cast (key), m_keyBytes);
- BOOST_FOREACH (NodeObject::ref obj, objs)
{
- Blob blob (toBlob (obj));
- batch.Put (
- hyperleveldb::Slice (reinterpret_cast(obj->getHash ().begin ()), 256 / 8),
- hyperleveldb::Slice (reinterpret_cast(&blob.front ()), blob.size ()));
+ // These are reused std::string objects,
+ // required for leveldb's funky interface.
+ //
+ StringPool::ScopedItem item (m_stringPool);
+ std::string& string = item.getObject ();
+
+ hyperleveldb::Status getStatus = m_db->Get (options, slice, &string);
+
+ if (getStatus.ok ())
+ {
+ NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
+
+ if (decoded.wasOk ())
+ {
+ *pObject = decoded.createObject ();
+ }
+ else
+ {
+ // Decoding failed, probably corrupted!
+ //
+ status = dataCorrupt;
+ }
+ }
+ else
+ {
+ if (getStatus.IsCorruption ())
+ {
+ status = dataCorrupt;
+ }
+ else if (getStatus.IsNotFound ())
+ {
+ status = notFound;
+ }
+ else
+ {
+ status = unknown;
+ }
+ }
}
- return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok ();
+
+ return status;
}
- NodeObject::pointer retrieve (uint256 const& hash)
+ void store (NodeObject::ref object)
{
- std::string sData;
- if (!mDB->Get (hyperleveldb::ReadOptions (),
- hyperleveldb::Slice (reinterpret_cast(hash.begin ()), 256 / 8), &sData).ok ())
+ m_batch.store (object);
+ }
+
+ void storeBatch (NodeStore::Batch const& batch)
+ {
+ hyperleveldb::WriteBatch wb;
+
{
- return NodeObject::pointer();
+ NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
+
+ BOOST_FOREACH (NodeObject::ref object, batch)
+ {
+ item.getObject ().prepare (object);
+
+ wb.Put (
+ hyperleveldb::Slice (reinterpret_cast (
+ item.getObject ().getKey ()), m_keyBytes),
+ hyperleveldb::Slice (reinterpret_cast (
+ item.getObject ().getData ()), item.getObject ().getSize ()));
+ }
}
- return fromBinary(hash, &sData[0], sData.size ());
+
+ hyperleveldb::WriteOptions const options;
+
+ m_db->Write (options, &wb).ok ();
}
- void visitAll (FUNCTION_TYPE func)
+ void visitAll (VisitCallback& callback)
{
- hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ());
+ hyperleveldb::ReadOptions const options;
+
+ ScopedPointer it (m_db->NewIterator (options));
+
for (it->SeekToFirst (); it->Valid (); it->Next ())
{
- if (it->key ().size () == 256 / 8)
+ if (it->key ().size () == m_keyBytes)
{
- uint256 hash;
- memcpy(hash.begin(), it->key ().data(), 256 / 8);
- func (fromBinary (hash, it->value ().data (), it->value ().size ()));
+ NodeStore::DecodedBlob decoded (it->key ().data (),
+ it->value ().data (),
+ it->value ().size ());
+
+ if (decoded.wasOk ())
+ {
+ NodeObject::Ptr object (decoded.createObject ());
+
+ callback.visitObject (object);
+ }
+ else
+ {
+ // Uh oh, corrupted data!
+ WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
+ }
+ }
+ else
+ {
+ // VFALCO NOTE What does it mean to find an
+ // incorrectly sized key? Corruption?
+ WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
}
}
}
- Blob toBlob(NodeObject::ref obj)
+ int getWriteLoad ()
{
- Blob rawData (9 + obj->getData ().size ());
- unsigned char* bufPtr = &rawData.front();
-
- *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ());
- *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ());
- * (bufPtr + 8) = static_cast (obj->getType ());
- memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
-
- return rawData;
+ return m_batch.getWriteLoad ();
}
- NodeObject::pointer fromBinary(uint256 const& hash,
- char const* data, int size)
+ //--------------------------------------------------------------------------
+
+ void writeBatch (NodeStore::Batch const& batch)
{
- if (size < 9)
- throw std::runtime_error ("undersized object");
-
- uint32 index = htonl (*reinterpret_cast (data));
- int htype = data[8];
-
- return boost::make_shared (static_cast (htype), index,
- data + 9, size - 9, hash);
+ storeBatch (batch);
}
private:
- std::string mName;
- hyperleveldb::DB* mDB;
+ size_t const m_keyBytes;
+ NodeStore::Scheduler& m_scheduler;
+ NodeStore::BatchWriter m_batch;
+ StringPool m_stringPool;
+ NodeStore::EncodedBlob::Pool m_blobPool;
+ std::string m_name;
+ ScopedPointer m_db;
};
//------------------------------------------------------------------------------
@@ -142,9 +232,12 @@ String HyperLevelDBBackendFactory::getName () const
return "HyperLevelDB";
}
-NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
+NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (
+ size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
{
- return new HyperLevelDBBackendFactory::Backend (keyValues);
+ return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
}
//------------------------------------------------------------------------------
diff --git a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h
index 1b44e4f9d1..43920477d8 100644
--- a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h
+++ b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h
@@ -23,7 +23,10 @@ public:
static HyperLevelDBBackendFactory& getInstance ();
String getName () const;
- NodeStore::Backend* createInstance (StringPairArray const& keyValues);
+
+ NodeStore::Backend* createInstance (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler);
};
#endif
diff --git a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp
new file mode 100644
index 0000000000..8b08c87d41
--- /dev/null
+++ b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp
@@ -0,0 +1,179 @@
+//------------------------------------------------------------------------------
+/*
+ Copyright (c) 2011-2013, OpenCoin, Inc.
+*/
+//==============================================================================
+
+class KeyvaDBBackendFactory::Backend : public NodeStore::Backend
+{
+private:
+ typedef RecycledObjectPool MemoryPool;
+ typedef RecycledObjectPool EncodedBlobPool;
+
+public:
+ Backend (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
+ : m_keyBytes (keyBytes)
+ , m_scheduler (scheduler)
+ , m_path (keyValues ["path"])
+ , m_db (KeyvaDB::New (
+ keyBytes,
+ 3,
+ File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("key"),
+ File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("val")))
+ {
+ }
+
+ ~Backend ()
+ {
+ }
+
+ std::string getName ()
+ {
+ return m_path.toStdString ();
+ }
+
+ //--------------------------------------------------------------------------
+
+ Status fetch (void const* key, NodeObject::Ptr* pObject)
+ {
+ pObject->reset ();
+
+ Status status (ok);
+
+ struct Callback : KeyvaDB::GetCallback
+ {
+ explicit Callback (MemoryBlock& block)
+ : m_block (block)
+ {
+ }
+
+ void* getStorageForValue (int valueBytes)
+ {
+ m_size = valueBytes;
+ m_block.ensureSize (valueBytes);
+
+ return m_block.getData ();
+ }
+
+ void const* getData () const noexcept
+ {
+ return m_block.getData ();
+ }
+
+ size_t getSize () const noexcept
+ {
+ return m_size;
+ }
+
+ private:
+ MemoryBlock& m_block;
+ size_t m_size;
+ };
+
+ MemoryPool::ScopedItem item (m_memoryPool);
+ MemoryBlock& block (item.getObject ());
+
+ Callback cb (block);
+
+ // VFALCO TODO Can't we get KeyvaDB to provide a proper status?
+ //
+ bool const found = m_db->get (key, &cb);
+
+ if (found)
+ {
+ NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ());
+
+ if (decoded.wasOk ())
+ {
+ *pObject = decoded.createObject ();
+
+ status = ok;
+ }
+ else
+ {
+ status = dataCorrupt;
+ }
+ }
+ else
+ {
+ status = notFound;
+ }
+
+ return status;
+ }
+
+ void store (NodeObject::ref object)
+ {
+ EncodedBlobPool::ScopedItem item (m_blobPool);
+ NodeStore::EncodedBlob& encoded (item.getObject ());
+
+ encoded.prepare (object);
+
+ m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ());
+ }
+
+ void storeBatch (NodeStore::Batch const& batch)
+ {
+ for (int i = 0; i < batch.size (); ++i)
+ store (batch [i]);
+ }
+
+ void visitAll (VisitCallback& callback)
+ {
+ // VFALCO TODO Implement this!
+ //
+ bassertfalse;
+ //m_db->visitAll ();
+ }
+
+ int getWriteLoad ()
+ {
+ // we dont do pending writes
+ return 0;
+ }
+
+ //--------------------------------------------------------------------------
+
+private:
+ size_t const m_keyBytes;
+ NodeStore::Scheduler& m_scheduler;
+ String m_path;
+ ScopedPointer m_db;
+ MemoryPool m_memoryPool;
+ EncodedBlobPool m_blobPool;
+};
+
+//------------------------------------------------------------------------------
+
+KeyvaDBBackendFactory::KeyvaDBBackendFactory ()
+{
+}
+
+KeyvaDBBackendFactory::~KeyvaDBBackendFactory ()
+{
+}
+
+KeyvaDBBackendFactory& KeyvaDBBackendFactory::getInstance ()
+{
+ static KeyvaDBBackendFactory instance;
+
+ return instance;
+}
+
+String KeyvaDBBackendFactory::getName () const
+{
+ return "KeyvaDB";
+}
+
+NodeStore::Backend* KeyvaDBBackendFactory::createInstance (
+ size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
+{
+ return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
+}
+
+//------------------------------------------------------------------------------
+
diff --git a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h
new file mode 100644
index 0000000000..40e76f1994
--- /dev/null
+++ b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h
@@ -0,0 +1,30 @@
+//------------------------------------------------------------------------------
+/*
+ Copyright (c) 2011-2013, OpenCoin, Inc.
+*/
+//==============================================================================
+
+#ifndef RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED
+#define RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED
+
+/** Factory to produce KeyvaDB backends for the NodeStore.
+*/
+class KeyvaDBBackendFactory : public NodeStore::BackendFactory
+{
+private:
+ class Backend;
+
+ KeyvaDBBackendFactory ();
+ ~KeyvaDBBackendFactory ();
+
+public:
+ static KeyvaDBBackendFactory& getInstance ();
+
+ String getName () const;
+
+ NodeStore::Backend* createInstance (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler);
+};
+
+#endif
diff --git a/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp b/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp
index b00fd0f287..0beb2d5c1b 100644
--- a/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp
+++ b/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp
@@ -4,23 +4,38 @@
*/
//==============================================================================
-class LevelDBBackendFactory::Backend : public NodeStore::Backend
+class LevelDBBackendFactory::Backend
+ : public NodeStore::Backend
+ , public NodeStore::BatchWriter::Callback
+ , LeakChecked
{
public:
- Backend (StringPairArray const& keyValues)
- : mName(keyValues ["path"].toStdString ())
- , mDB(NULL)
+ typedef RecycledObjectPool StringPool;
+
+ //--------------------------------------------------------------------------
+
+ Backend (int keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
+ : m_keyBytes (keyBytes)
+ , m_scheduler (scheduler)
+ , m_batch (*this, scheduler)
+ , m_name (keyValues ["path"].toStdString ())
{
- if (mName.empty())
- throw std::runtime_error ("Missing path in LevelDB backend");
+ if (m_name.empty())
+ Throw (std::runtime_error ("Missing path in LevelDB backend"));
leveldb::Options options;
options.create_if_missing = true;
if (keyValues["cache_mb"].isEmpty())
+ {
options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
+ }
else
+ {
options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
+ }
if (keyValues["filter_bits"].isEmpty())
{
@@ -28,94 +43,171 @@ public:
options.filter_policy = leveldb::NewBloomFilterPolicy (10);
}
else if (keyValues["filter_bits"].getIntValue() != 0)
+ {
options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
+ }
- if (!keyValues["open_files"].isEmpty())
+ if (! keyValues["open_files"].isEmpty())
+ {
options.max_open_files = keyValues["open_files"].getIntValue();
+ }
- leveldb::Status status = leveldb::DB::Open (options, mName, &mDB);
- if (!status.ok () || !mDB)
- throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
+ leveldb::DB* db = nullptr;
+ leveldb::Status status = leveldb::DB::Open (options, m_name, &db);
+ if (!status.ok () || !db)
+ Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
+
+ m_db = db;
}
~Backend ()
{
- delete mDB;
}
- std::string getDataBaseName()
+ std::string getName()
{
- return mName;
+ return m_name;
}
- bool bulkStore (const std::vector< NodeObject::pointer >& objs)
- {
- leveldb::WriteBatch batch;
+ //--------------------------------------------------------------------------
+
+ Status fetch (void const* key, NodeObject::Ptr* pObject)
+ {
+ pObject->reset ();
+
+ Status status (ok);
+
+ leveldb::ReadOptions const options;
+ leveldb::Slice const slice (static_cast (key), m_keyBytes);
- BOOST_FOREACH (NodeObject::ref obj, objs)
{
- Blob blob (toBlob (obj));
- batch.Put (
- leveldb::Slice (reinterpret_cast(obj->getHash ().begin ()), 256 / 8),
- leveldb::Slice (reinterpret_cast(&blob.front ()), blob.size ()));
+ // These are reused std::string objects,
+ // required for leveldb's funky interface.
+ //
+ StringPool::ScopedItem item (m_stringPool);
+ std::string& string = item.getObject ();
+
+ leveldb::Status getStatus = m_db->Get (options, slice, &string);
+
+ if (getStatus.ok ())
+ {
+ NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
+
+ if (decoded.wasOk ())
+ {
+ *pObject = decoded.createObject ();
+ }
+ else
+ {
+ // Decoding failed, probably corrupted!
+ //
+ status = dataCorrupt;
+ }
+ }
+ else
+ {
+ if (getStatus.IsCorruption ())
+ {
+ status = dataCorrupt;
+ }
+ else if (getStatus.IsNotFound ())
+ {
+ status = notFound;
+ }
+ else
+ {
+ status = unknown;
+ }
+ }
}
- return mDB->Write (leveldb::WriteOptions (), &batch).ok ();
+
+ return status;
}
- NodeObject::pointer retrieve (uint256 const& hash)
+ void store (NodeObject::ref object)
{
- std::string sData;
- if (!mDB->Get (leveldb::ReadOptions (),
- leveldb::Slice (reinterpret_cast(hash.begin ()), 256 / 8), &sData).ok ())
+ m_batch.store (object);
+ }
+
+ void storeBatch (NodeStore::Batch const& batch)
+ {
+ leveldb::WriteBatch wb;
+
{
- return NodeObject::pointer();
+ NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
+
+ BOOST_FOREACH (NodeObject::ref object, batch)
+ {
+ item.getObject ().prepare (object);
+
+ wb.Put (
+ leveldb::Slice (reinterpret_cast (item.getObject ().getKey ()),
+ m_keyBytes),
+ leveldb::Slice (reinterpret_cast (item.getObject ().getData ()),
+ item.getObject ().getSize ()));
+ }
}
- return fromBinary(hash, &sData[0], sData.size ());
+
+ leveldb::WriteOptions const options;
+
+ m_db->Write (options, &wb).ok ();
}
- void visitAll (FUNCTION_TYPE func)
+ void visitAll (VisitCallback& callback)
{
- leveldb::Iterator* it = mDB->NewIterator (leveldb::ReadOptions ());
+ leveldb::ReadOptions const options;
+
+ ScopedPointer it (m_db->NewIterator (options));
+
for (it->SeekToFirst (); it->Valid (); it->Next ())
{
- if (it->key ().size () == 256 / 8)
+ if (it->key ().size () == m_keyBytes)
{
- uint256 hash;
- memcpy(hash.begin(), it->key ().data(), 256 / 8);
- func (fromBinary (hash, it->value ().data (), it->value ().size ()));
+ NodeStore::DecodedBlob decoded (it->key ().data (),
+ it->value ().data (),
+ it->value ().size ());
+
+ if (decoded.wasOk ())
+ {
+ NodeObject::Ptr object (decoded.createObject ());
+
+ callback.visitObject (object);
+ }
+ else
+ {
+ // Uh oh, corrupted data!
+ WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
+ }
+ }
+ else
+ {
+ // VFALCO NOTE What does it mean to find an
+ // incorrectly sized key? Corruption?
+ WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
}
}
}
- Blob toBlob(NodeObject::ref obj)
+ int getWriteLoad ()
{
- Blob rawData (9 + obj->getData ().size ());
- unsigned char* bufPtr = &rawData.front();
-
- *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ());
- *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ());
- * (bufPtr + 8) = static_cast (obj->getType ());
- memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
-
- return rawData;
+ return m_batch.getWriteLoad ();
}
- NodeObject::pointer fromBinary(uint256 const& hash,
- char const* data, int size)
+ //--------------------------------------------------------------------------
+
+ void writeBatch (NodeStore::Batch const& batch)
{
- if (size < 9)
- throw std::runtime_error ("undersized object");
-
- uint32 index = htonl (*reinterpret_cast (data));
- int htype = data[8];
-
- return boost::make_shared (static_cast (htype), index,
- data + 9, size - 9, hash);
+ storeBatch (batch);
}
private:
- std::string mName;
- leveldb::DB* mDB;
+ size_t const m_keyBytes;
+ NodeStore::Scheduler& m_scheduler;
+ NodeStore::BatchWriter m_batch;
+ StringPool m_stringPool;
+ NodeStore::EncodedBlob::Pool m_blobPool;
+ std::string m_name;
+ ScopedPointer m_db;
};
//------------------------------------------------------------------------------
@@ -140,9 +232,12 @@ String LevelDBBackendFactory::getName () const
return "LevelDB";
}
-NodeStore::Backend* LevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
+NodeStore::Backend* LevelDBBackendFactory::createInstance (
+ size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
{
- return new LevelDBBackendFactory::Backend (keyValues);
+ return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
}
//------------------------------------------------------------------------------
diff --git a/modules/ripple_app/node/ripple_LevelDBBackendFactory.h b/modules/ripple_app/node/ripple_LevelDBBackendFactory.h
index b2f324f927..3646125d1d 100644
--- a/modules/ripple_app/node/ripple_LevelDBBackendFactory.h
+++ b/modules/ripple_app/node/ripple_LevelDBBackendFactory.h
@@ -21,7 +21,10 @@ public:
static LevelDBBackendFactory& getInstance ();
String getName () const;
- NodeStore::Backend* createInstance (StringPairArray const& keyValues);
+
+ NodeStore::Backend* createInstance (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler);
};
#endif
diff --git a/modules/ripple_app/node/ripple_MdbBackendFactory.cpp b/modules/ripple_app/node/ripple_MdbBackendFactory.cpp
index 0b74349ab3..c454380f8f 100644
--- a/modules/ripple_app/node/ripple_MdbBackendFactory.cpp
+++ b/modules/ripple_app/node/ripple_MdbBackendFactory.cpp
@@ -6,171 +6,242 @@
#if RIPPLE_MDB_AVAILABLE
-class MdbBackendFactory::Backend : public NodeStore::Backend
+class MdbBackendFactory::Backend
+ : public NodeStore::Backend
+ , public NodeStore::BatchWriter::Callback
+ , LeakChecked
{
public:
- explicit Backend (StringPairArray const& keyValues)
- : m_env (nullptr)
+ typedef NodeStore::Batch Batch;
+ typedef NodeStore::EncodedBlob EncodedBlob;
+ typedef NodeStore::DecodedBlob DecodedBlob;
+
+ explicit Backend (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
+ : m_keyBytes (keyBytes)
+ , m_scheduler (scheduler)
+ , m_batch (*this, scheduler)
+ , m_env (nullptr)
{
- if (keyValues ["path"].isEmpty ())
- throw std::runtime_error ("Missing path in MDB backend");
+ String path (keyValues ["path"]);
- int error = 0;
+ if (path.isEmpty ())
+ Throw (std::runtime_error ("Missing path in MDB backend"));
- error = mdb_env_create (&m_env);
+ m_basePath = path.toStdString();
- if (error == 0) // Should use the size of the file plus the free space on the disk
- error = mdb_env_set_mapsize(m_env, 512L * 1024L * 1024L * 1024L);
+ // Regarding the path supplied to mdb_env_open:
+ // This directory must already exist and be writable.
+ //
+ File dir (File::getCurrentWorkingDirectory().getChildFile (path));
+ Result result = dir.createDirectory ();
- if (error == 0)
- error = mdb_env_open (
- m_env,
- keyValues ["path"].toStdString().c_str (),
- MDB_NOTLS,
- 0664);
+ if (result.wasOk ())
+ {
+ int error = mdb_env_create (&m_env);
- MDB_txn * txn;
- if (error == 0)
- error = mdb_txn_begin(m_env, NULL, 0, &txn);
- if (error == 0)
- error = mdb_dbi_open(txn, NULL, 0, &m_dbi);
- if (error == 0)
- error = mdb_txn_commit(txn);
+ // Should use the size of the file plus the free space on the disk
+ if (error == 0)
+ error = mdb_env_set_mapsize (m_env, 512L * 1024L * 1024L * 1024L);
+ if (error == 0)
+ error = mdb_env_open (
+ m_env,
+ m_basePath.c_str (),
+ MDB_NOTLS,
+ 0664);
- if (error != 0)
+ MDB_txn* txn;
+
+ if (error == 0)
+ error = mdb_txn_begin (m_env, NULL, 0, &txn);
+
+ if (error == 0)
+ error = mdb_dbi_open (txn, NULL, 0, &m_dbi);
+
+ if (error == 0)
+ error = mdb_txn_commit (txn);
+
+ if (error != 0)
+ {
+ String s;
+ s << "Error #" << error << " creating mdb environment";
+ Throw (std::runtime_error (s.toStdString ()));
+ }
+ }
+ else
{
String s;
- s << "Error #" << error << " creating mdb environment";
- throw std::runtime_error (s.toStdString ());
+ s << "MDB Backend failed to create directory, " << result.getErrorMessage ();
+ Throw (std::runtime_error (s.toStdString().c_str()));
}
- m_name = keyValues ["path"].toStdString();
}
~Backend ()
{
if (m_env != nullptr)
{
- mdb_dbi_close(m_env, m_dbi);
+ mdb_dbi_close (m_env, m_dbi);
mdb_env_close (m_env);
}
}
- std::string getDataBaseName()
+ std::string getName()
{
- return m_name;
+ return m_basePath;
}
- bool bulkStore (std::vector const& objs)
+ //--------------------------------------------------------------------------
+
+ template
+ unsigned char* mdb_cast (T* p)
{
- MDB_txn *txn = nullptr;
- int rc = 0;
+ return const_cast (static_cast (p));
+ }
- rc = mdb_txn_begin(m_env, NULL, 0, &txn);
+ Status fetch (void const* key, NodeObject::Ptr* pObject)
+ {
+ pObject->reset ();
- if (rc == 0)
+ Status status (ok);
+
+ MDB_txn* txn = nullptr;
+
+ int error = 0;
+
+ error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn);
+
+ if (error == 0)
{
- BOOST_FOREACH (NodeObject::ref obj, objs)
- {
- MDB_val key, data;
- Blob blob (toBlob (obj));
+ MDB_val dbkey;
+ MDB_val data;
- key.mv_size = (256 / 8);
- key.mv_data = const_cast(obj->getHash().begin());
+ dbkey.mv_size = m_keyBytes;
+ dbkey.mv_data = mdb_cast (key);
- data.mv_size = blob.size();
- data.mv_data = &blob.front();
+ error = mdb_get (txn, m_dbi, &dbkey, &data);
- rc = mdb_put(txn, m_dbi, &key, &data, 0);
- if (rc != 0)
+ if (error == 0)
+ {
+ DecodedBlob decoded (key, data.mv_data, data.mv_size);
+
+ if (decoded.wasOk ())
{
- assert(false);
+ *pObject = decoded.createObject ();
+ }
+ else
+ {
+ status = dataCorrupt;
+ }
+ }
+ else if (error == MDB_NOTFOUND)
+ {
+ status = notFound;
+ }
+ else
+ {
+ status = unknown;
+
+ WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
+ }
+
+ mdb_txn_abort (txn);
+ }
+ else
+ {
+ status = unknown;
+
+ WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
+ }
+
+ return status;
+ }
+
+ void store (NodeObject::ref object)
+ {
+ m_batch.store (object);
+ }
+
+ void storeBatch (Batch const& batch)
+ {
+ MDB_txn* txn = nullptr;
+
+ int error = 0;
+
+ error = mdb_txn_begin (m_env, NULL, 0, &txn);
+
+ if (error == 0)
+ {
+ EncodedBlob::Pool::ScopedItem item (m_blobPool);
+
+ BOOST_FOREACH (NodeObject::Ptr const& object, batch)
+ {
+ EncodedBlob& encoded (item.getObject ());
+
+ encoded.prepare (object);
+
+ MDB_val key;
+ key.mv_size = m_keyBytes;
+ key.mv_data = mdb_cast (encoded.getKey ());
+
+ MDB_val data;
+ data.mv_size = encoded.getSize ();
+ data.mv_data = mdb_cast (encoded.getData ());
+
+ error = mdb_put (txn, m_dbi, &key, &data, 0);
+
+ if (error != 0)
+ {
+ WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error;
break;
}
- }
+ }
+
+ if (error == 0)
+ {
+ error = mdb_txn_commit(txn);
+
+ if (error != 0)
+ {
+ WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error;
+ }
+ }
+ else
+ {
+ mdb_txn_abort (txn);
+ }
}
else
- assert(false);
-
- if (rc == 0)
- rc = mdb_txn_commit(txn);
- else if (txn)
- mdb_txn_abort(txn);
-
- assert(rc == 0);
- return rc == 0;
- }
-
- NodeObject::pointer retrieve (uint256 const& hash)
- {
- NodeObject::pointer ret;
-
- MDB_txn *txn = nullptr;
- int rc = 0;
-
- rc = mdb_txn_begin(m_env, NULL, MDB_RDONLY, &txn);
-
- if (rc == 0)
{
- MDB_val key, data;
-
- key.mv_size = (256 / 8);
- key.mv_data = const_cast(hash.begin());
-
- rc = mdb_get(txn, m_dbi, &key, &data);
- if (rc == 0)
- ret = fromBinary(hash, static_cast(data.mv_data), data.mv_size);
- else
- assert(rc == MDB_NOTFOUND);
+ WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error;
}
- else
- assert(false);
-
- mdb_txn_abort(txn);
-
- return ret;
}
- void visitAll (FUNCTION_TYPE func)
- { // WRITEME
- assert(false);
- }
-
- Blob toBlob (NodeObject::ref obj) const
+ void visitAll (VisitCallback& callback)
{
- Blob rawData (9 + obj->getData ().size ());
- unsigned char* bufPtr = &rawData.front();
-
- *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ());
-
- *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ());
-
- *(bufPtr + 8) = static_cast (obj->getType ());
-
- memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
-
- return rawData;
+ // VFALCO TODO Implement this!
+ bassertfalse;
}
- NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) const
+ int getWriteLoad ()
{
- if (size < 9)
- throw std::runtime_error ("undersized object");
+ return m_batch.getWriteLoad ();
+ }
- uint32 const index = htonl (*reinterpret_cast (data));
+ //--------------------------------------------------------------------------
- int const htype = data [8];
-
- return boost::make_shared (
- static_cast (htype),
- index,
- data + 9,
- size - 9,
- hash);
+ void writeBatch (Batch const& batch)
+ {
+ storeBatch (batch);
}
private:
- std::string m_name;
+ size_t const m_keyBytes;
+ NodeStore::Scheduler& m_scheduler;
+ NodeStore::BatchWriter m_batch;
+ NodeStore::EncodedBlob::Pool m_blobPool;
+ std::string m_basePath;
MDB_env* m_env;
MDB_dbi m_dbi;
};
@@ -197,9 +268,12 @@ String MdbBackendFactory::getName () const
return "mdb";
}
-NodeStore::Backend* MdbBackendFactory::createInstance (StringPairArray const& keyValues)
+NodeStore::Backend* MdbBackendFactory::createInstance (
+ size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
{
- return new MdbBackendFactory::Backend (keyValues);
+ return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler);
}
#endif
diff --git a/modules/ripple_app/node/ripple_MdbBackendFactory.h b/modules/ripple_app/node/ripple_MdbBackendFactory.h
index 702ca3a14a..2e1cd7db65 100644
--- a/modules/ripple_app/node/ripple_MdbBackendFactory.h
+++ b/modules/ripple_app/node/ripple_MdbBackendFactory.h
@@ -25,7 +25,10 @@ public:
static MdbBackendFactory& getInstance ();
String getName () const;
- NodeStore::Backend* createInstance (StringPairArray const& keyValues);
+
+ NodeStore::Backend* createInstance (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler);
};
#endif
diff --git a/modules/ripple_app/node/ripple_NodeObject.cpp b/modules/ripple_app/node/ripple_NodeObject.cpp
index ac8bce22ee..1d3f282762 100644
--- a/modules/ripple_app/node/ripple_NodeObject.cpp
+++ b/modules/ripple_app/node/ripple_NodeObject.cpp
@@ -6,30 +6,32 @@
SETUP_LOG (NodeObject)
-NodeObject::NodeObject (
- NodeObjectType type,
- LedgerIndex ledgerIndex,
- Blob const& binaryDataToCopy,
- uint256 const& hash)
- : mType (type)
- , mHash (hash)
- , mLedgerIndex (ledgerIndex)
- , mData (binaryDataToCopy)
-{
-}
+//------------------------------------------------------------------------------
NodeObject::NodeObject (
NodeObjectType type,
LedgerIndex ledgerIndex,
- void const* bufferToCopy,
- int bytesInBuffer,
- uint256 const& hash)
+ Blob& data,
+ uint256 const& hash,
+ PrivateAccess)
: mType (type)
, mHash (hash)
, mLedgerIndex (ledgerIndex)
- , mData (static_cast (bufferToCopy),
- static_cast (bufferToCopy) + bytesInBuffer)
{
+ // Take over the caller's buffer
+ mData.swap (data);
+}
+
+NodeObject::Ptr NodeObject::createObject (
+ NodeObjectType type,
+ LedgerIndex ledgerIndex,
+ Blob& data,
+ uint256 const & hash)
+{
+ // The boost::ref is important or
+ // else it will be passed by value!
+ return boost::make_shared (
+ type, ledgerIndex, boost::ref (data), hash, PrivateAccess ());
}
NodeObjectType NodeObject::getType () const
@@ -51,3 +53,39 @@ Blob const& NodeObject::getData () const
{
return mData;
}
+
+bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const
+{
+ if (mType != other->mType)
+ return false;
+
+ if (mHash != other->mHash)
+ return false;
+
+ if (mLedgerIndex != other->mLedgerIndex)
+ return false;
+
+ if (mData != other->mData)
+ return false;
+
+ return true;
+}
+
+//------------------------------------------------------------------------------
+
+class NodeObjectTests : public UnitTest
+{
+public:
+
+ NodeObjectTests () : UnitTest ("NodeObject", "ripple")
+ {
+ }
+
+
+ void runTest ()
+ {
+ }
+};
+
+static NodeObjectTests nodeObjectTests;
+
diff --git a/modules/ripple_app/node/ripple_NodeObject.h b/modules/ripple_app/node/ripple_NodeObject.h
index b889666f48..7bbf7dd584 100644
--- a/modules/ripple_app/node/ripple_NodeObject.h
+++ b/modules/ripple_app/node/ripple_NodeObject.h
@@ -34,27 +34,60 @@ class NodeObject : public CountedObject
public:
static char const* getCountedObjectName () { return "NodeObject"; }
+ enum
+ {
+ /** Size of the fixed keys, in bytes.
+
+ We use a 256-bit hash for the keys.
+
+ @see NodeObject
+ */
+ keyBytes = 32,
+ };
+
+ /** The type used to hold the hash.
+
+ The hahes are fixed size, SHA256.
+
+ @note The key size can be retrieved with `Hash::sizeInBytes`
+ */
+ typedef UnsignedInteger <32> Hash;
+
+ // Please use this one. For a reference use Ptr const&
+ typedef boost::shared_ptr Ptr;
+
+ // These are DEPRECATED, type names are capitalized.
typedef boost::shared_ptr pointer;
typedef pointer const& ref;
- /** Create from a vector of data.
-
- @note A copy of the data is created.
- */
+private:
+ // This hack is used to make the constructor effectively private
+ // except for when we use it in the call to make_shared.
+ // There's no portable way to make make_shared<> a friend work.
+ struct PrivateAccess { };
+public:
+ // This constructor is private, use createObject instead.
NodeObject (NodeObjectType type,
- LedgerIndex ledgerIndex,
- Blob const & binaryDataToCopy,
- uint256 const & hash);
+ LedgerIndex ledgerIndex,
+ Blob& data,
+ uint256 const& hash,
+ PrivateAccess);
- /** Create from an area of memory.
+ /** Create an object from fields.
- @note A copy of the data is created.
+ The caller's variable is modified during this call. The
+ underlying storage for the Blob is taken over by the NodeObject.
+
+ @param type The type of object.
+ @param ledgerIndex The ledger in which this object appears.
+ @param data A buffer containing the payload. The caller's variable
+ is overwritten.
+ @param hash The 256-bit hash of the payload data.
*/
- NodeObject (NodeObjectType type,
- LedgerIndex ledgerIndex,
- void const * bufferToCopy,
- int bytesInBuffer,
- uint256 const & hash);
+ static Ptr createObject (NodeObjectType type,
+ LedgerIndex ledgerIndex,
+ Blob& data,
+ uint256 const& hash);
/** Retrieve the type of this object.
*/
@@ -73,11 +106,30 @@ public:
*/
Blob const& getData () const;
+ /** See if this object has the same data as another object.
+ */
+ bool isCloneOf (NodeObject::Ptr const& other) const;
+
+ /** Binary function that satisfies the strict-weak-ordering requirement.
+
+ This compares the hashes of both objects and returns true if
+ the first hash is considered to go before the second.
+
+ @see std::sort
+ */
+ struct LessThan
+ {
+ inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept
+ {
+ return lhs->getHash () < rhs->getHash ();
+ }
+ };
+
private:
- NodeObjectType const mType;
- uint256 const mHash;
- LedgerIndex const mLedgerIndex;
- Blob const mData;
+ NodeObjectType mType;
+ uint256 mHash;
+ LedgerIndex mLedgerIndex;
+ Blob mData;
};
#endif
diff --git a/modules/ripple_app/node/ripple_NodeStore.cpp b/modules/ripple_app/node/ripple_NodeStore.cpp
index 960e0d805f..b0ddd751d7 100644
--- a/modules/ripple_app/node/ripple_NodeStore.cpp
+++ b/modules/ripple_app/node/ripple_NodeStore.cpp
@@ -4,211 +4,164 @@
*/
//==============================================================================
-Array NodeStore::s_factories;
-
-NodeStore::NodeStore (String backendParameters, String fastBackendParameters, int cacheSize, int cacheAge)
- : m_backend (createBackend (backendParameters))
- , mCache ("NodeStore", cacheSize, cacheAge)
- , mNegativeCache ("HashedObjectNegativeCache", 0, 120)
+NodeStore::DecodedBlob::DecodedBlob (void const* key, void const* value, int valueBytes)
{
- if (fastBackendParameters.isNotEmpty ())
- m_fastBackend = createBackend (fastBackendParameters);
-}
+ /* Data format:
-void NodeStore::addBackendFactory (BackendFactory& factory)
-{
- s_factories.add (&factory);
-}
+ Bytes
-float NodeStore::getCacheHitRate ()
-{
- return mCache.getHitRate ();
-}
+ 0...3 LedgerIndex 32-bit big endian integer
+ 4...7 Unused? An unused copy of the LedgerIndex
+ 8 char One of NodeObjectType
+ 9...end The body of the object data
+ */
-void NodeStore::tune (int size, int age)
-{
- mCache.setTargetSize (size);
- mCache.setTargetAge (age);
-}
+ m_success = false;
+ m_key = key;
+ // VFALCO NOTE Ledger indexes should have started at 1
+ m_ledgerIndex = LedgerIndex (-1);
+ m_objectType = hotUNKNOWN;
+ m_objectData = nullptr;
+ m_dataBytes = bmax (0, valueBytes - 9);
-void NodeStore::sweep ()
-{
- mCache.sweep ();
- mNegativeCache.sweep ();
-}
-
-void NodeStore::waitWrite ()
-{
- m_backend->waitWrite ();
- if (m_fastBackend)
- m_fastBackend->waitWrite ();
-}
-
-int NodeStore::getWriteLoad ()
-{
- return m_backend->getWriteLoad ();
-}
-
-bool NodeStore::store (NodeObjectType type, uint32 index,
- Blob const& data, uint256 const& hash)
-{
- // return: false = already in cache, true = added to cache
- if (mCache.touch (hash))
- return false;
-
-#ifdef PARANOID
- assert (hash == Serializer::getSHA512Half (data));
-#endif
-
- NodeObject::pointer object = boost::make_shared (type, index, data, hash);
-
- if (!mCache.canonicalize (hash, object))
+ if (valueBytes > 4)
{
- m_backend->store (object);
- if (m_fastBackend)
- m_fastBackend->store (object);
+ LedgerIndex const* index = static_cast (value);
+ m_ledgerIndex = ByteOrder::swapIfLittleEndian (*index);
}
- mNegativeCache.del (hash);
- return true;
-}
+ // VFALCO NOTE What about bytes 4 through 7 inclusive?
-NodeObject::pointer NodeStore::retrieve (uint256 const& hash)
-{
- NodeObject::pointer obj = mCache.fetch (hash);
-
- if (obj || mNegativeCache.isPresent (hash))
- return obj;
-
- if (m_fastBackend)
+ if (valueBytes > 8)
{
- obj = m_fastBackend->retrieve (hash);
+ unsigned char const* byte = static_cast (value);
+ m_objectType = static_cast (byte [8]);
+ }
- if (obj)
+ if (valueBytes > 9)
+ {
+ m_objectData = static_cast (value) + 9;
+
+ switch (m_objectType)
{
- mCache.canonicalize (hash, obj);
- return obj;
+ case hotUNKNOWN:
+ default:
+ break;
+
+ case hotLEDGER:
+ case hotTRANSACTION:
+ case hotACCOUNT_NODE:
+ case hotTRANSACTION_NODE:
+ m_success = true;
+ break;
}
}
-
- {
- LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
- obj = m_backend->retrieve(hash);
-
- if (!obj)
- {
- mNegativeCache.add (hash);
- return obj;
- }
- }
-
- mCache.canonicalize (hash, obj);
-
- if (m_fastBackend)
- m_fastBackend->store(obj);
-
- WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db";
- return obj;
}
-void NodeStore::importVisitor (
- std::vector & objects,
- NodeObject::pointer object)
+NodeObject::Ptr NodeStore::DecodedBlob::createObject ()
{
- if (objects.size() >= 128)
- {
- m_backend->bulkStore (objects);
+ bassert (m_success);
- objects.clear ();
- objects.reserve (128);
+ NodeObject::Ptr object;
+
+ if (m_success)
+ {
+ Blob data (m_dataBytes);
+
+ memcpy (data.data (), m_objectData, m_dataBytes);
+
+ object = NodeObject::createObject (
+ m_objectType, m_ledgerIndex, data, uint256 (m_key));
}
- objects.push_back (object);
+ return object;
}
-int NodeStore::import (String sourceBackendParameters)
+//------------------------------------------------------------------------------
+
+void NodeStore::EncodedBlob::prepare (NodeObject::Ptr const& object)
{
- ScopedPointer srcBackend (createBackend (sourceBackendParameters));
+ m_key = object->getHash ().begin ();
- WriteLog (lsWARNING, NodeObject) <<
- "Node import from '" << srcBackend->getDataBaseName() << "' to '"
- << m_backend->getDataBaseName() << "'.";
+ // This is how many bytes we need in the flat data
+ m_size = object->getData ().size () + 9;
- std::vector objects;
+ m_data.ensureSize (m_size);
- objects.reserve (128);
+ // These sizes must be the same!
+ static_bassert (sizeof (uint32) == sizeof (object->getIndex ()));
- srcBackend->visitAll (BIND_TYPE (&NodeStore::importVisitor, this, boost::ref (objects), P_1));
-
- if (!objects.empty ())
- m_backend->bulkStore (objects);
-
- return 0;
-}
-
-NodeStore::Backend* NodeStore::createBackend (String const& parameters)
-{
- Backend* backend = nullptr;
-
- StringPairArray keyValues = parseKeyValueParameters (parameters, '|');
-
- String const& type = keyValues ["type"];
-
- if (type.isNotEmpty ())
{
- BackendFactory* factory = nullptr;
+ uint32* buf = static_cast (m_data.getData ());
- for (int i = 0; i < s_factories.size (); ++i)
- {
- if (s_factories [i]->getName () == type)
- {
- factory = s_factories [i];
- break;
- }
- }
-
- if (factory != nullptr)
- {
- backend = factory->createInstance (keyValues);
- }
- else
- {
- throw std::runtime_error ("unkown backend type");
- }
- }
- else
- {
- throw std::runtime_error ("missing backend type");
+ buf [0] = ByteOrder::swapIfLittleEndian (object->getIndex ());
+ buf [1] = ByteOrder::swapIfLittleEndian (object->getIndex ());
}
- return backend;
+ {
+ unsigned char* buf = static_cast (m_data.getData ());
+
+ buf [8] = static_cast (object->getType ());
+
+ memcpy (&buf [9], object->getData ().data (), object->getData ().size ());
+ }
}
-bool NodeStore::Backend::store (NodeObject::ref object)
+//==============================================================================
+
+NodeStore::BatchWriter::BatchWriter (Callback& callback, Scheduler& scheduler)
+ : m_callback (callback)
+ , m_scheduler (scheduler)
+ , mWriteGeneration (0)
+ , mWriteLoad (0)
+ , mWritePending (false)
{
- boost::mutex::scoped_lock sl (mWriteMutex);
+ mWriteSet.reserve (batchWritePreallocationSize);
+}
+
+NodeStore::BatchWriter::~BatchWriter ()
+{
+ waitForWriting ();
+}
+
+void NodeStore::BatchWriter::store (NodeObject::ref object)
+{
+ LockType::scoped_lock sl (mWriteMutex);
+
mWriteSet.push_back (object);
- if (!mWritePending)
+ if (! mWritePending)
{
mWritePending = true;
- getApp().getJobQueue ().addJob (jtWRITE, "NodeObject::store",
- BIND_TYPE (&NodeStore::Backend::bulkWrite, this, P_1));
+
+ m_scheduler.scheduleTask (this);
}
- return true;
}
-void NodeStore::Backend::bulkWrite (Job &)
+int NodeStore::BatchWriter::getWriteLoad ()
+{
+ LockType::scoped_lock sl (mWriteMutex);
+
+ return std::max (mWriteLoad, static_cast (mWriteSet.size ()));
+}
+
+void NodeStore::BatchWriter::performScheduledTask ()
+{
+ writeBatch ();
+}
+
+void NodeStore::BatchWriter::writeBatch ()
{
int setSize = 0;
- while (1)
+ for (;;)
{
std::vector< boost::shared_ptr > set;
- set.reserve (128);
+
+ set.reserve (batchWritePreallocationSize);
{
- boost::mutex::scoped_lock sl (mWriteMutex);
+ LockType::scoped_lock sl (mWriteMutex);
mWriteSet.swap (set);
assert (mWriteSet.empty ());
@@ -219,29 +172,1006 @@ void NodeStore::Backend::bulkWrite (Job &)
{
mWritePending = false;
mWriteLoad = 0;
+
+ // VFALCO NOTE Fix this function to not return from the middle
return;
}
+ // VFALCO NOTE On the first trip through, mWriteLoad will be 0.
+ // This is probably not intended. Perhaps the order
+ // of calls isn't quite right
+ //
mWriteLoad = std::max (setSize, static_cast (mWriteSet.size ()));
+
setSize = set.size ();
}
- bulkStore (set);
+ m_callback.writeBatch (set);
}
}
-void NodeStore::Backend::waitWrite ()
+void NodeStore::BatchWriter::waitForWriting ()
{
- boost::mutex::scoped_lock sl (mWriteMutex);
+ LockType::scoped_lock sl (mWriteMutex);
int gen = mWriteGeneration;
while (mWritePending && (mWriteGeneration == gen))
mWriteCondition.wait (sl);
}
-int NodeStore::Backend::getWriteLoad ()
-{
- boost::mutex::scoped_lock sl (mWriteMutex);
+//==============================================================================
- return std::max (mWriteLoad, static_cast (mWriteSet.size ()));
+class NodeStoreImp
+ : public NodeStore
+ , LeakChecked
+{
+public:
+ NodeStoreImp (Parameters const& backendParameters,
+ Parameters const& fastBackendParameters,
+ Scheduler& scheduler)
+ : m_scheduler (scheduler)
+ , m_backend (createBackend (backendParameters, scheduler))
+ , m_fastBackend ((fastBackendParameters.size () > 0)
+ ? createBackend (fastBackendParameters, scheduler) : nullptr)
+ , m_cache ("NodeStore", 16384, 300)
+ , m_negativeCache ("NoteStoreNegativeCache", 0, 120)
+ {
+ }
+
+ ~NodeStoreImp ()
+ {
+ }
+
+ String getName () const
+ {
+ return m_backend->getName ();
+ }
+
+ //------------------------------------------------------------------------------
+
+ NodeObject::Ptr fetch (uint256 const& hash)
+ {
+ // See if the object already exists in the cache
+ //
+ NodeObject::Ptr obj = m_cache.fetch (hash);
+
+ if (obj == nullptr)
+ {
+ // It's not in the cache, see if we can skip checking the db.
+ //
+ if (! m_negativeCache.isPresent (hash))
+ {
+ // There's still a chance it could be in one of the databases.
+
+ bool foundInFastBackend = false;
+
+ // Check the fast backend database if we have one
+ //
+ if (m_fastBackend != nullptr)
+ {
+ obj = fetchInternal (m_fastBackend, hash);
+
+ // If we found the object, avoid storing it again later.
+ if (obj != nullptr)
+ foundInFastBackend = true;
+ }
+
+ // Are we still without an object?
+ //
+ if (obj == nullptr)
+ {
+ // Yes so at last we will try the main database.
+ //
+ {
+ // Monitor this operation's load since it is expensive.
+ //
+ // VFALCO TODO Why is this an autoptr? Why can't it just be a plain old object?
+ //
+ // VFALCO NOTE Commented this out because it breaks the unit test!
+ //
+ //LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
+
+ obj = fetchInternal (m_backend, hash);
+ }
+
+ // If it's not in the main database, remember that so we
+ // can skip the lookup for the same object again later.
+ //
+ if (obj == nullptr)
+ m_negativeCache.add (hash);
+ }
+
+ // Did we finally get something?
+ //
+ if (obj != nullptr)
+ {
+ // Yes it so canonicalize. This solves the problem where
+ // more than one thread has its own copy of the same object.
+ //
+ m_cache.canonicalize (hash, obj);
+
+ if (! foundInFastBackend)
+ {
+ // If we have a fast back end, store it there for later.
+ //
+ if (m_fastBackend != nullptr)
+ m_fastBackend->store (obj);
+
+ // Since this was a 'hard' fetch, we will log it.
+ //
+ WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db";
+ }
+ }
+ }
+ else
+ {
+ // hash is known not to be in the database
+ }
+ }
+ else
+ {
+ // found it!
+ }
+
+ return obj;
+ }
+
+ NodeObject::Ptr fetchInternal (Backend* backend, uint256 const& hash)
+ {
+ NodeObject::Ptr object;
+
+ Backend::Status const status = backend->fetch (hash.begin (), &object);
+
+ switch (status)
+ {
+ case Backend::ok:
+ case Backend::notFound:
+ break;
+
+ case Backend::dataCorrupt:
+ // VFALCO TODO Deal with encountering corrupt data!
+ //
+ WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << hash;
+ break;
+
+ default:
+ WriteLog (lsWARNING, NodeObject) << "Unknown status=" << status;
+ break;
+ }
+
+ return object;
+ }
+
+ //------------------------------------------------------------------------------
+
+ void store (NodeObjectType type,
+ uint32 index,
+ Blob& data,
+ uint256 const& hash)
+ {
+ bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash);
+
+ // VFALCO NOTE What happens if the key is found, but the object
+ // fell out of the cache? We will end up passing it
+ // to the backend anyway.
+ //
+ if (! keyFoundAndObjectCached)
+ {
+ #if RIPPLE_VERIFY_NODEOBJECT_KEYS
+ assert (hash == Serializer::getSHA512Half (data));
+ #endif
+
+ NodeObject::Ptr object = NodeObject::createObject (
+ type, index, data, hash);
+
+ if (!m_cache.canonicalize (hash, object))
+ {
+ m_backend->store (object);
+
+ if (m_fastBackend)
+ m_fastBackend->store (object);
+ }
+
+ m_negativeCache.del (hash);
+ }
+ }
+
+ //------------------------------------------------------------------------------
+
+ float getCacheHitRate ()
+ {
+ return m_cache.getHitRate ();
+ }
+
+ void tune (int size, int age)
+ {
+ m_cache.setTargetSize (size);
+ m_cache.setTargetAge (age);
+ }
+
+ void sweep ()
+ {
+ m_cache.sweep ();
+ m_negativeCache.sweep ();
+ }
+
+ int getWriteLoad ()
+ {
+ return m_backend->getWriteLoad ();
+ }
+
+ //------------------------------------------------------------------------------
+
+ void visitAll (Backend::VisitCallback& callback)
+ {
+ m_backend->visitAll (callback);
+ }
+
+ void import (NodeStore& sourceDatabase)
+ {
+ class ImportVisitCallback : public Backend::VisitCallback
+ {
+ public:
+ explicit ImportVisitCallback (Backend& backend)
+ : m_backend (backend)
+ {
+ m_objects.reserve (batchWritePreallocationSize);
+ }
+
+ ~ImportVisitCallback ()
+ {
+ if (! m_objects.empty ())
+ m_backend.storeBatch (m_objects);
+ }
+
+ void visitObject (NodeObject::Ptr const& object)
+ {
+ if (m_objects.size () >= batchWritePreallocationSize)
+ {
+ m_backend.storeBatch (m_objects);
+
+ m_objects.clear ();
+ m_objects.reserve (batchWritePreallocationSize);
+ }
+
+ m_objects.push_back (object);
+ }
+
+ private:
+ Backend& m_backend;
+ Batch m_objects;
+ };
+
+ //--------------------------------------------------------------------------
+
+ ImportVisitCallback callback (*m_backend);
+
+ sourceDatabase.visitAll (callback);
+ }
+
+ //------------------------------------------------------------------------------
+
+ static NodeStore::Backend* createBackend (
+ Parameters const& parameters, Scheduler& scheduler = getSynchronousScheduler ())
+ {
+ Backend* backend = nullptr;
+
+ String const& type = parameters ["type"];
+
+ if (type.isNotEmpty ())
+ {
+ BackendFactory* factory = nullptr;
+
+ for (int i = 0; i < s_factories.size (); ++i)
+ {
+ if (s_factories [i]->getName ().compareIgnoreCase (type) == 0)
+ {
+ factory = s_factories [i];
+ break;
+ }
+ }
+
+ if (factory != nullptr)
+ {
+ backend = factory->createInstance (NodeObject::keyBytes, parameters, scheduler);
+ }
+ else
+ {
+ Throw (std::runtime_error ("unknown backend type"));
+ }
+ }
+ else
+ {
+ Throw (std::runtime_error ("missing backend type"));
+ }
+
+ return backend;
+ }
+
+ static void addBackendFactory (BackendFactory& factory)
+ {
+ s_factories.add (&factory);
+ }
+
+ //------------------------------------------------------------------------------
+
+private:
+ static Array s_factories;
+
+ Scheduler& m_scheduler;
+
+ // Persistent key/value storage.
+ ScopedPointer m_backend;
+
+ // Larger key/value storage, but not necessarily persistent.
+ ScopedPointer m_fastBackend;
+
+ // VFALCO NOTE What are these things for? We need comments.
+ TaggedCache m_cache;
+ KeyCache m_negativeCache;
+};
+
+Array NodeStoreImp::s_factories;
+
+//------------------------------------------------------------------------------
+
+void NodeStore::addBackendFactory (BackendFactory& factory)
+{
+ NodeStoreImp::addBackendFactory (factory);
}
+
+NodeStore::Scheduler& NodeStore::getSynchronousScheduler ()
+{
+ // Simple scheduler that performs the task immediately
+ struct SynchronousScheduler : Scheduler
+ {
+ void scheduleTask (Task* task)
+ {
+ task->performScheduledTask ();
+ }
+ };
+
+ static SynchronousScheduler scheduler;
+
+ return scheduler;
+}
+
+NodeStore* NodeStore::New (Parameters const& backendParameters,
+ Parameters fastBackendParameters,
+ Scheduler& scheduler)
+{
+ return new NodeStoreImp (backendParameters,
+ fastBackendParameters,
+ scheduler);
+}
+
+//==============================================================================
+
+// Some common code for the unit tests
+//
+class NodeStoreUnitTest : public UnitTest
+{
+public:
+ // Tunable parameters
+ //
+ enum
+ {
+ maxPayloadBytes = 1000,
+ numObjectsToTest = 1000
+ };
+
+ // Shorthand type names
+ //
+ typedef NodeStore::Backend Backend;
+ typedef NodeStore::Batch Batch;
+
+ // Creates predictable objects
+ class PredictableObjectFactory
+ {
+ public:
+ explicit PredictableObjectFactory (int64 seedValue)
+ : m_seedValue (seedValue)
+ {
+ }
+
+ NodeObject::Ptr createObject (int index)
+ {
+ Random r (m_seedValue + index);
+
+ NodeObjectType type;
+ switch (r.nextInt (4))
+ {
+ case 0: type = hotLEDGER; break;
+ case 1: type = hotTRANSACTION; break;
+ case 2: type = hotACCOUNT_NODE; break;
+ case 3: type = hotTRANSACTION_NODE; break;
+ default:
+ type = hotUNKNOWN;
+ break;
+ };
+
+ LedgerIndex ledgerIndex = 1 + r.nextInt (1024 * 1024);
+
+ uint256 hash;
+ r.nextBlob (hash.begin (), hash.size ());
+
+ int const payloadBytes = 1 + r.nextInt (maxPayloadBytes);
+
+ Blob data (payloadBytes);
+
+ r.nextBlob (data.data (), payloadBytes);
+
+ return NodeObject::createObject (type, ledgerIndex, data, hash);
+ }
+
+ private:
+ int64 const m_seedValue;
+ };
+
+public:
+ NodeStoreUnitTest (String name, UnitTest::When when = UnitTest::runAlways)
+ : UnitTest (name, "ripple", when)
+ {
+ }
+
+ // Create a predictable batch of objects
+ static void createPredictableBatch (Batch& batch, int startingIndex, int numObjects, int64 seedValue)
+ {
+ batch.reserve (numObjects);
+
+ PredictableObjectFactory factory (seedValue);
+
+ for (int i = 0; i < numObjects; ++i)
+ batch.push_back (factory.createObject (startingIndex + i));
+ }
+
+ // Compare two batches for equality
+ static bool areBatchesEqual (Batch const& lhs, Batch const& rhs)
+ {
+ bool result = true;
+
+ if (lhs.size () == rhs.size ())
+ {
+ for (int i = 0; i < lhs.size (); ++i)
+ {
+ if (! lhs [i]->isCloneOf (rhs [i]))
+ {
+ result = false;
+ break;
+ }
+ }
+ }
+ else
+ {
+ result = false;
+ }
+
+ return result;
+ }
+
+ // Store a batch in a backend
+ void storeBatch (Backend& backend, Batch const& batch)
+ {
+ for (int i = 0; i < batch.size (); ++i)
+ {
+ backend.store (batch [i]);
+ }
+ }
+
+ // Get a copy of a batch in a backend
+ void fetchCopyOfBatch (Backend& backend, Batch* pCopy, Batch const& batch)
+ {
+ pCopy->clear ();
+ pCopy->reserve (batch.size ());
+
+ for (int i = 0; i < batch.size (); ++i)
+ {
+ NodeObject::Ptr object;
+
+ Backend::Status const status = backend.fetch (
+ batch [i]->getHash ().cbegin (), &object);
+
+ expect (status == Backend::ok, "Should be ok");
+
+ if (status == Backend::ok)
+ {
+ expect (object != nullptr, "Should not be null");
+
+ pCopy->push_back (object);
+ }
+ }
+ }
+
+ // Store all objects in a batch
+ static void storeBatch (NodeStore& db, NodeStore::Batch const& batch)
+ {
+ for (int i = 0; i < batch.size (); ++i)
+ {
+ NodeObject::Ptr const object (batch [i]);
+
+ Blob data (object->getData ());
+
+ db.store (object->getType (),
+ object->getIndex (),
+ data,
+ object->getHash ());
+ }
+ }
+
+ // Fetch all the hashes in one batch, into another batch.
+ static void fetchCopyOfBatch (NodeStore& db,
+ NodeStore::Batch* pCopy,
+ NodeStore::Batch const& batch)
+ {
+ pCopy->clear ();
+ pCopy->reserve (batch.size ());
+
+ for (int i = 0; i < batch.size (); ++i)
+ {
+ NodeObject::Ptr object = db.fetch (batch [i]->getHash ());
+
+ if (object != nullptr)
+ pCopy->push_back (object);
+ }
+ }
+};
+
+//------------------------------------------------------------------------------
+
+// Tests predictable batches, and NodeObject blob encoding
+//
+class NodeStoreBasicsTests : public NodeStoreUnitTest
+{
+public:
+ typedef NodeStore::EncodedBlob EncodedBlob;
+ typedef NodeStore::DecodedBlob DecodedBlob;
+
+ NodeStoreBasicsTests () : NodeStoreUnitTest ("NodeStoreBasics")
+ {
+ }
+
+ // Make sure predictable object generation works!
+ void testBatches (int64 const seedValue)
+ {
+ beginTest ("batch");
+
+ Batch batch1;
+ createPredictableBatch (batch1, 0, numObjectsToTest, seedValue);
+
+ Batch batch2;
+ createPredictableBatch (batch2, 0, numObjectsToTest, seedValue);
+
+ expect (areBatchesEqual (batch1, batch2), "Should be equal");
+
+ Batch batch3;
+ createPredictableBatch (batch3, 1, numObjectsToTest, seedValue);
+
+ expect (! areBatchesEqual (batch1, batch3), "Should not be equal");
+ }
+
+ // Checks encoding/decoding blobs
+ void testBlobs (int64 const seedValue)
+ {
+ beginTest ("encoding");
+
+ Batch batch;
+ createPredictableBatch (batch, 0, numObjectsToTest, seedValue);
+
+ EncodedBlob encoded;
+ for (int i = 0; i < batch.size (); ++i)
+ {
+ encoded.prepare (batch [i]);
+
+ DecodedBlob decoded (encoded.getKey (), encoded.getData (), encoded.getSize ());
+
+ expect (decoded.wasOk (), "Should be ok");
+
+ if (decoded.wasOk ())
+ {
+ NodeObject::Ptr const object (decoded.createObject ());
+
+ expect (batch [i]->isCloneOf (object), "Should be clones");
+ }
+ }
+ }
+
+ void runTest ()
+ {
+ int64 const seedValue = 50;
+
+ testBatches (seedValue);
+
+ testBlobs (seedValue);
+ }
+};
+
+static NodeStoreBasicsTests nodeStoreBasicsTests;
+
+//------------------------------------------------------------------------------
+
+// Tests the NodeStore::Backend interface
+//
+class NodeStoreBackendTests : public NodeStoreUnitTest
+{
+public:
+ NodeStoreBackendTests () : NodeStoreUnitTest ("NodeStoreBackend")
+ {
+ }
+
+ //--------------------------------------------------------------------------
+
+ void testBackend (String type, int64 const seedValue)
+ {
+ beginTest (String ("NodeStore::Backend type=") + type);
+
+ StringPairArray params;
+ File const path (File::createTempFile ("node_db"));
+ params.set ("type", type);
+ params.set ("path", path.getFullPathName ());
+
+ // Create a batch
+ NodeStore::Batch batch;
+ createPredictableBatch (batch, 0, numObjectsToTest, seedValue);
+
+ {
+ // Open the backend
+ ScopedPointer backend (NodeStoreImp::createBackend (params));
+
+ // Write the batch
+ storeBatch (*backend, batch);
+
+ {
+ // Read it back in
+ NodeStore::Batch copy;
+ fetchCopyOfBatch (*backend, ©, batch);
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+
+ {
+ // Reorder and read the copy again
+ NodeStore::Batch copy;
+ UnitTestUtilities::repeatableShuffle (batch.size (), batch, seedValue);
+ fetchCopyOfBatch (*backend, ©, batch);
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+ }
+
+ {
+ // Re-open the backend
+ ScopedPointer backend (NodeStoreImp::createBackend (params));
+
+ // Read it back in
+ NodeStore::Batch copy;
+ fetchCopyOfBatch (*backend, ©, batch);
+ // Canonicalize the source and destination batches
+ std::sort (batch.begin (), batch.end (), NodeObject::LessThan ());
+ std::sort (copy.begin (), copy.end (), NodeObject::LessThan ());
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+ }
+
+ //--------------------------------------------------------------------------
+
+ void runTest ()
+ {
+ int const seedValue = 50;
+
+ testBackend ("keyvadb", seedValue);
+
+ testBackend ("leveldb", seedValue);
+
+ testBackend ("sqlite", seedValue);
+
+ #if RIPPLE_HYPERLEVELDB_AVAILABLE
+ testBackend ("hyperleveldb", seedValue);
+ #endif
+
+ #if RIPPLE_MDB_AVAILABLE
+ testBackend ("mdb", seedValue);
+ #endif
+ }
+};
+
+static NodeStoreBackendTests nodeStoreBackendTests;
+
+//------------------------------------------------------------------------------
+
+class NodeStoreTimingTests : public NodeStoreUnitTest
+{
+public:
+ enum
+ {
+ numObjectsToTest = 20000
+ };
+
+ NodeStoreTimingTests ()
+ : NodeStoreUnitTest ("NodeStoreTiming", UnitTest::runManual)
+ {
+ }
+
+ class Stopwatch
+ {
+ public:
+ Stopwatch ()
+ {
+ }
+
+ void start ()
+ {
+ m_startTime = Time::getHighResolutionTicks ();
+ }
+
+ double getElapsed ()
+ {
+ int64 const now = Time::getHighResolutionTicks();
+
+ return Time::highResolutionTicksToSeconds (now - m_startTime);
+ }
+
+ private:
+ int64 m_startTime;
+ };
+
+ //--------------------------------------------------------------------------
+
+ void testBackend (String type, int64 const seedValue)
+ {
+ String s;
+ s << "Testing backend '" << type << "' performance";
+ beginTest (s);
+
+ StringPairArray params;
+ File const path (File::createTempFile ("node_db"));
+ params.set ("type", type);
+ params.set ("path", path.getFullPathName ());
+
+ // Create batches
+ NodeStore::Batch batch1;
+ createPredictableBatch (batch1, 0, numObjectsToTest, seedValue);
+ NodeStore::Batch batch2;
+ createPredictableBatch (batch2, 0, numObjectsToTest, seedValue);
+
+ // Open the backend
+ ScopedPointer backend (NodeStoreImp::createBackend (params));
+
+ Stopwatch t;
+
+ // Individual write batch test
+ t.start ();
+ storeBatch (*backend, batch1);
+ s = "";
+ s << " Single write: " << String (t.getElapsed (), 2) << " seconds";
+ logMessage (s);
+
+ // Bulk write batch test
+ t.start ();
+ backend->storeBatch (batch2);
+ s = "";
+ s << " Batch write: " << String (t.getElapsed (), 2) << " seconds";
+ logMessage (s);
+
+ // Read test
+ Batch copy;
+ t.start ();
+ fetchCopyOfBatch (*backend, ©, batch1);
+ fetchCopyOfBatch (*backend, ©, batch2);
+ s = "";
+ s << " Batch read: " << String (t.getElapsed (), 2) << " seconds";
+ logMessage (s);
+ }
+
+ //--------------------------------------------------------------------------
+
+ void runTest ()
+ {
+ int const seedValue = 50;
+
+ testBackend ("keyvadb", seedValue);
+
+ testBackend ("leveldb", seedValue);
+
+ #if RIPPLE_HYPERLEVELDB_AVAILABLE
+ testBackend ("hyperleveldb", seedValue);
+ #endif
+
+ #if RIPPLE_MDB_AVAILABLE
+ testBackend ("mdb", seedValue);
+ #endif
+
+ testBackend ("sqlite", seedValue);
+ }
+};
+
+static NodeStoreTimingTests nodeStoreTimingTests;
+
+//------------------------------------------------------------------------------
+
+class NodeStoreTests : public NodeStoreUnitTest
+{
+public:
+ NodeStoreTests () : NodeStoreUnitTest ("NodeStore")
+ {
+ }
+
+ void testImport (String destBackendType, String srcBackendType, int64 seedValue)
+ {
+ File const node_db (File::createTempFile ("node_db"));
+ StringPairArray srcParams;
+ srcParams.set ("type", srcBackendType);
+ srcParams.set ("path", node_db.getFullPathName ());
+
+ // Create a batch
+ NodeStore::Batch batch;
+ createPredictableBatch (batch, 0, numObjectsToTest, seedValue);
+
+ // Write to source db
+ {
+ ScopedPointer src (NodeStore::New (srcParams));
+
+ storeBatch (*src, batch);
+ }
+
+ NodeStore::Batch copy;
+
+ {
+ // Re-open the db
+ ScopedPointer src (NodeStore::New (srcParams));
+
+ // Set up the destination database
+ File const dest_db (File::createTempFile ("dest_db"));
+ StringPairArray destParams;
+ destParams.set ("type", destBackendType);
+ destParams.set ("path", dest_db.getFullPathName ());
+
+ ScopedPointer dest (NodeStore::New (destParams));
+
+ beginTest (String ("import into '") + destBackendType + "' from '" + srcBackendType + "'");
+
+ // Do the import
+ dest->import (*src);
+
+ // Get the results of the import
+ fetchCopyOfBatch (*dest, ©, batch);
+ }
+
+ // Canonicalize the source and destination batches
+ std::sort (batch.begin (), batch.end (), NodeObject::LessThan ());
+ std::sort (copy.begin (), copy.end (), NodeObject::LessThan ());
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+
+ }
+
+ //--------------------------------------------------------------------------
+
+ void testNodeStore (String type, bool const useEphemeralDatabase, int64 const seedValue)
+ {
+ String s;
+ s << String ("NodeStore backend '") + type + "'";
+ if (useEphemeralDatabase)
+ s << " (with ephemeral database)";
+
+ beginTest (s);
+
+ File const node_db (File::createTempFile ("node_db"));
+ StringPairArray nodeParams;
+ nodeParams.set ("type", type);
+ nodeParams.set ("path", node_db.getFullPathName ());
+
+ File const temp_db (File::createTempFile ("temp_db"));
+ StringPairArray tempParams;
+ if (useEphemeralDatabase)
+ {
+ tempParams.set ("type", type);
+ tempParams.set ("path", temp_db.getFullPathName ());
+ }
+
+ // Create a batch
+ NodeStore::Batch batch;
+ createPredictableBatch (batch, 0, numObjectsToTest, seedValue);
+
+ {
+ // Open the database
+ ScopedPointer db (NodeStore::New (nodeParams, tempParams));
+
+ // Write the batch
+ storeBatch (*db, batch);
+
+ {
+ // Read it back in
+ NodeStore::Batch copy;
+ fetchCopyOfBatch (*db, ©, batch);
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+
+ {
+ // Reorder and read the copy again
+ NodeStore::Batch copy;
+ UnitTestUtilities::repeatableShuffle (batch.size (), batch, seedValue);
+ fetchCopyOfBatch (*db, ©, batch);
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+ }
+
+ {
+ // Re-open the database without the ephemeral DB
+ ScopedPointer db (NodeStore::New (nodeParams));
+
+ // Read it back in
+ NodeStore::Batch copy;
+ fetchCopyOfBatch (*db, ©, batch);
+
+ // Canonicalize the source and destination batches
+ std::sort (batch.begin (), batch.end (), NodeObject::LessThan ());
+ std::sort (copy.begin (), copy.end (), NodeObject::LessThan ());
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+
+ if (useEphemeralDatabase)
+ {
+ // Verify the ephemeral db
+ ScopedPointer db (NodeStore::New (tempParams, StringPairArray ()));
+
+ // Read it back in
+ NodeStore::Batch copy;
+ fetchCopyOfBatch (*db, ©, batch);
+
+ // Canonicalize the source and destination batches
+ std::sort (batch.begin (), batch.end (), NodeObject::LessThan ());
+ std::sort (copy.begin (), copy.end (), NodeObject::LessThan ());
+ expect (areBatchesEqual (batch, copy), "Should be equal");
+ }
+ }
+
+ //--------------------------------------------------------------------------
+
+ void runBackendTests (bool useEphemeralDatabase, int64 const seedValue)
+ {
+ testNodeStore ("keyvadb", useEphemeralDatabase, seedValue);
+
+ testNodeStore ("leveldb", useEphemeralDatabase, seedValue);
+
+ testNodeStore ("sqlite", useEphemeralDatabase, seedValue);
+
+ #if RIPPLE_HYPERLEVELDB_AVAILABLE
+ testNodeStore ("hyperleveldb", useEphemeralDatabase, seedValue);
+ #endif
+
+ #if RIPPLE_MDB_AVAILABLE
+ testNodeStore ("mdb", useEphemeralDatabase, seedValue);
+ #endif
+ }
+
+ //--------------------------------------------------------------------------
+
+ void runImportTests (int64 const seedValue)
+ {
+ //testImport ("keyvadb", "keyvadb", seedValue);
+
+ testImport ("leveldb", "leveldb", seedValue);
+
+ #if RIPPLE_HYPERLEVELDB_AVAILABLE
+ testImport ("hyperleveldb", "hyperleveldb", seedValue);
+ #endif
+
+ #if RIPPLE_MDB_AVAILABLE
+ testImport ("mdb", "mdb", seedValue);
+ #endif
+
+ testImport ("sqlite", "sqlite", seedValue);
+ }
+
+ //--------------------------------------------------------------------------
+
+ void runTest ()
+ {
+ int64 const seedValue = 50;
+
+ runBackendTests (false, seedValue);
+
+ runBackendTests (true, seedValue);
+
+ runImportTests (seedValue);
+ }
+};
+
+static NodeStoreTests nodeStoreTests;
diff --git a/modules/ripple_app/node/ripple_NodeStore.h b/modules/ripple_app/node/ripple_NodeStore.h
index dc21f4c2f4..a2c26f72df 100644
--- a/modules/ripple_app/node/ripple_NodeStore.h
+++ b/modules/ripple_app/node/ripple_NodeStore.h
@@ -8,60 +8,282 @@
#define RIPPLE_NODESTORE_H_INCLUDED
/** Persistency layer for NodeObject
+
+ A Node is a ledger object which is uniquely identified by a key, which is
+ the 256-bit hash of the body of the node. The payload is a variable length
+ block of serialized data.
+
+ All ledger data is stored as node objects and as such, needs to be persisted
+ between launches. Furthermore, since the set of node objects will in
+ general be larger than the amount of available memory, purged node objects
+ which are later accessed must be retrieved from the node store.
+
+ @see NodeObject
*/
-class NodeStore : LeakChecked
+class NodeStore
{
public:
- /** Back end used for the store.
+ enum
+ {
+ // This is only used to pre-allocate the array for
+ // batch objects and does not affect the amount written.
+ //
+ batchWritePreallocationSize = 128
+ };
+
+ typedef std::vector Batch;
+
+ typedef StringPairArray Parameters;
+
+ //--------------------------------------------------------------------------
+
+ /** Parsed key/value blob into NodeObject components.
+
+ This will extract the information required to construct a NodeObject. It
+ also does consistency checking and returns the result, so it is possible
+ to determine if the data is corrupted without throwing an exception. Not
+ all forms of corruption are detected so further analysis will be needed
+ to eliminate false negatives.
+
+ @note This defines the database format of a NodeObject!
+ */
+ class DecodedBlob
+ {
+ public:
+ /** Construct the decoded blob from raw data. */
+ DecodedBlob (void const* key, void const* value, int valueBytes);
+
+ /** Determine if the decoding was successful. */
+ bool wasOk () const noexcept { return m_success; }
+
+ /** Create a NodeObject from this data. */
+ NodeObject::Ptr createObject ();
+
+ private:
+ bool m_success;
+
+ void const* m_key;
+ LedgerIndex m_ledgerIndex;
+ NodeObjectType m_objectType;
+ unsigned char const* m_objectData;
+ int m_dataBytes;
+ };
+
+ //--------------------------------------------------------------------------
+
+ /** Utility for producing flattened node objects.
+
+ These get recycled to prevent many small allocations.
+
+ @note This defines the database format of a NodeObject!
+ */
+ struct EncodedBlob
+ {
+ typedef RecycledObjectPool Pool;
+
+ void prepare (NodeObject::Ptr const& object);
+
+ void const* getKey () const noexcept { return m_key; }
+
+ size_t getSize () const noexcept { return m_size; }
+
+ void const* getData () const noexcept { return m_data.getData (); }
+
+ private:
+ void const* m_key;
+ MemoryBlock m_data;
+ size_t m_size;
+ };
+
+ //--------------------------------------------------------------------------
+
+ /** Provides optional asynchronous scheduling for backends.
+
+ For improved performance, a backend has the option of performing writes
+ in batches. These writes can be scheduled using the provided scheduler
+ object.
+
+ @see BatchWriter
+ */
+ class Scheduler
+ {
+ public:
+ /** Derived classes perform scheduled tasks. */
+ struct Task
+ {
+ virtual ~Task () { }
+
+ /** Performs the task.
+
+ The call may take place on a foreign thread.
+ */
+ virtual void performScheduledTask () = 0;
+ };
+
+ /** Schedules a task.
+
+ Depending on the implementation, this could happen
+ immediately or get deferred.
+ */
+ virtual void scheduleTask (Task* task) = 0;
+ };
+
+ //--------------------------------------------------------------------------
+
+ /** Helps with batch writing.
+
+ The batch writes are performed with a scheduled task. Use of the
+ class it not required. A backend can implement its own write batching,
+ or skip write batching if doing so yields a performance benefit.
+
+ @see Scheduler
+ */
+ // VFALCO NOTE I'm not entirely happy having placed this here,
+ // because whoever needs to use NodeStore certainly doesn't
+ // need to see the implementation details of BatchWriter.
+ //
+ class BatchWriter : private Scheduler::Task
+ {
+ public:
+ /** This callback does the actual writing. */
+ struct Callback
+ {
+ virtual void writeBatch (Batch const& batch) = 0;
+ };
+
+ /** Create a batch writer. */
+ BatchWriter (Callback& callback, Scheduler& scheduler);
+
+ /** Destroy a batch writer.
+
+ Anything pending in the batch is written out before this returns.
+ */
+ ~BatchWriter ();
+
+ /** Store the object.
+
+ This will add to the batch and initiate a scheduled task to
+ write the batch out.
+ */
+ void store (NodeObject::Ptr const& object);
+
+ /** Get an estimate of the amount of writing I/O pending. */
+ int getWriteLoad ();
+
+ private:
+ void performScheduledTask ();
+ void writeBatch ();
+ void waitForWriting ();
+
+ private:
+ typedef boost::recursive_mutex LockType;
+ typedef boost::condition_variable_any CondvarType;
+
+ Callback& m_callback;
+ Scheduler& m_scheduler;
+ LockType mWriteMutex;
+ CondvarType mWriteCondition;
+ int mWriteGeneration;
+ int mWriteLoad;
+ bool mWritePending;
+ Batch mWriteSet;
+ };
+
+ //--------------------------------------------------------------------------
+
+ /** A backend used for the store.
+
+ The NodeStore uses a swappable backend so that other database systems
+ can be tried. Different databases may offer various features such
+ as improved performance, fault tolerant or distributed storage, or
+ all in-memory operation.
+
+ A given instance of a backend is fixed to a particular key size.
*/
class Backend
{
public:
- // VFALCO TODO Move the function definition to the .cpp
- Backend ()
- : mWriteGeneration(0)
- , mWriteLoad(0)
- , mWritePending(false)
+ /** Return codes from operations. */
+ enum Status
{
- mWriteSet.reserve(128);
- }
+ ok,
+ notFound,
+ dataCorrupt,
+ unknown
+ };
+ /** Destroy the backend.
+
+ All open files are closed and flushed. If there are batched writes
+ or other tasks scheduled, they will be completed before this call
+ returns.
+ */
virtual ~Backend () { }
- virtual std::string getDataBaseName() = 0;
+ /** Get the human-readable name of this backend.
- // Store/retrieve a single object
- // These functions must be thread safe
- virtual bool store (NodeObject::ref);
- virtual NodeObject::pointer retrieve (uint256 const &hash) = 0;
+ This is used for diagnostic output.
+ */
+ virtual std::string getName() = 0;
- // Store a group of objects
- // This function will only be called from a single thread
- virtual bool bulkStore (const std::vector< NodeObject::pointer >&) = 0;
+ /** Fetch a single object.
- // Visit every object in the database
- // This function will only be called during an import operation
- //
- // VFALCO TODO Replace FUNCTION_TYPE with a beast lift.
- //
- virtual void visitAll (FUNCTION_TYPE ) = 0;
+ If the object is not found or an error is encountered, the
+ result will indicate the condition.
- // VFALCO TODO Put this bulk writing logic into a separate class.
- virtual void bulkWrite (Job &);
- virtual void waitWrite ();
- virtual int getWriteLoad ();
+ @note This will be called concurrently.
- protected:
- // VFALCO TODO Put this bulk writing logic into a separate class.
- boost::mutex mWriteMutex;
- boost::condition_variable mWriteCondition;
- int mWriteGeneration;
- int mWriteLoad;
- bool mWritePending;
- std::vector > mWriteSet;
+ @param key A pointer to the key data.
+ @param pObject [out] The created object if successful.
+
+ @return The result of the operation.
+ */
+ virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0;
+
+ /** Store a single object.
+
+ Depending on the implementation this may happen immediately
+ or deferred using a scheduled task.
+
+ @note This will be called concurrently.
+
+ @param object The object to store.
+ */
+ virtual void store (NodeObject::Ptr const& object) = 0;
+
+ /** Store a group of objects.
+
+ @note This function will not be called concurrently with
+ itself or @ref store.
+ */
+ virtual void storeBatch (Batch const& batch) = 0;
+
+ /** Callback for iterating through objects.
+
+ @see visitAll
+ */
+ struct VisitCallback
+ {
+ virtual void visitObject (NodeObject::Ptr const& object) = 0;
+ };
+
+ /** Visit every object in the database
+
+ This is usually called during import.
+
+ @note This routine will not be called concurrently with itself
+ or other methods.
+
+ @see import, VisitCallback
+ */
+ virtual void visitAll (VisitCallback& callback) = 0;
+
+ /** Estimate the number of write operations pending. */
+ virtual int getWriteLoad () = 0;
};
-public:
+ //--------------------------------------------------------------------------
+
/** Factory to produce backends.
*/
class BackendFactory
@@ -69,67 +291,142 @@ public:
public:
virtual ~BackendFactory () { }
- /** Retrieve the name of this factory.
- */
+ /** Retrieve the name of this factory. */
virtual String getName () const = 0;
/** Create an instance of this factory's backend.
+
+ @param keyBytes The fixed number of bytes per key.
+ @param keyValues A set of key/value configuration pairs.
+ @param scheduler The scheduler to use for running tasks.
+
+ @return A pointer to the Backend object.
*/
- virtual Backend* createInstance (StringPairArray const& keyValues) = 0;
+ virtual Backend* createInstance (size_t keyBytes,
+ Parameters const& parameters,
+ Scheduler& scheduler) = 0;
};
-public:
+ //--------------------------------------------------------------------------
+
/** Construct a node store.
- parameters has the format:
+ The parameters are key value pairs passed to the backend. The
+ 'type' key must exist, it defines the choice of backend. Most
+ backends also require a 'path' field.
+
+ Some choices for 'type' are:
+ HyperLevelDB, LevelDB, SQLite, KeyvaDB, MDB
- =['|'=]
+ If the fastBackendParameter is omitted or empty, no ephemeral database
+ is used. If the scheduler parameter is omited or unspecified, a
+ synchronous scheduler is used which performs all tasks immediately on
+ the caller's thread.
- The key "type" must exist, it defines the backend. For example
- "type=LevelDB|path=/mnt/ephemeral"
+ @note If the database cannot be opened or created, an exception is thrown.
+
+ @param backendParameters The parameter string for the persistent backend.
+ @param fastBackendParameters [optional] The parameter string for the ephemeral backend.
+ @param scheduler [optional The scheduler to use for performing asynchronous tasks.
+
+ @return The opened database.
*/
- // VFALCO NOTE Is cacheSize in bytes? objects? KB?
- // Is cacheAge in minutes? seconds?
- //
- NodeStore (String backendParameters,
- String fastBackendParameters,
- int cacheSize,
- int cacheAge);
+ static NodeStore* New (Parameters const& backendParameters,
+ Parameters fastBackendParameters = Parameters (),
+ Scheduler& scheduler = getSynchronousScheduler ());
+
+ /** Get the synchronous scheduler.
+
+ The synchronous scheduler performs all tasks immediately, before
+ returning to the caller, using the caller's thread.
+ */
+ static Scheduler& getSynchronousScheduler ();
+
+ /** Destroy the node store.
+
+ All pending operations are completed, pending writes flushed,
+ and files closed before this returns.
+ */
+ virtual ~NodeStore () { }
+
+ /** Retrieve the name associated with this backend.
+
+ This is used for diagnostics and may not reflect the actual path
+ or paths used by the underlying backend.
+ */
+ virtual String getName () const = 0;
/** Add the specified backend factory to the list of available factories.
The names of available factories are compared against the "type"
value in the parameter list on construction.
+
+ @param factory The factory to add.
*/
static void addBackendFactory (BackendFactory& factory);
- float getCacheHitRate ();
+ /** Fetch an object.
- bool store (NodeObjectType type, uint32 index, Blob const& data,
- uint256 const& hash);
+ If the object is known to be not in the database, isn't found in the
+ database during the fetch, or failed to load correctly during the fetch,
+ `nullptr` is returned.
- NodeObject::pointer retrieve (uint256 const& hash);
+ @note This can be called concurrently.
- void waitWrite ();
- void tune (int size, int age);
- void sweep ();
- int getWriteLoad ();
+ @param hash The key of the object to retrieve.
- int import (String sourceBackendParameters);
+ @return The object, or nullptr if it couldn't be retrieved.
+ */
+ virtual NodeObject::pointer fetch (uint256 const& hash) = 0;
-private:
- void importVisitor (std::vector & objects, NodeObject::pointer object);
-
- static Backend* createBackend (String const& parameters);
+ /** Store the object.
- static Array s_factories;
+ The caller's Blob parameter is overwritten.
-private:
- ScopedPointer m_backend;
- ScopedPointer m_fastBackend;
+ @param type The type of object.
+ @param ledgerIndex The ledger in which the object appears.
+ @param data The payload of the object. The caller's
+ variable is overwritten.
+ @param hash The 256-bit hash of the payload data.
+
+ @return `true` if the object was stored?
+ */
+ virtual void store (NodeObjectType type,
+ uint32 ledgerIndex,
+ Blob& data,
+ uint256 const& hash) = 0;
+
+ /** Visit every object in the database
+
+ This is usually called during import.
+
+ @note This routine will not be called concurrently with itself
+ or other methods.
+
+ @see import
+ */
+ virtual void visitAll (Backend::VisitCallback& callback) = 0;
+
+ /** Import objects from another database. */
+ virtual void import (NodeStore& sourceDatabase) = 0;
+
+
+ /** Retrieve the estimated number of pending write operations.
+
+ This is used for diagnostics.
+ */
+ virtual int getWriteLoad () = 0;
+
+ // VFALCO TODO Document this.
+ virtual float getCacheHitRate () = 0;
+
+ // VFALCO TODO Document this.
+ // TODO Document the parameter meanings.
+ virtual void tune (int size, int age) = 0;
+
+ // VFALCO TODO Document this.
+ virtual void sweep () = 0;
- TaggedCache mCache;
- KeyCache mNegativeCache;
};
#endif
diff --git a/modules/ripple_app/node/ripple_NullBackendFactory.cpp b/modules/ripple_app/node/ripple_NullBackendFactory.cpp
index 6ffb0d8299..6a3b000c75 100644
--- a/modules/ripple_app/node/ripple_NullBackendFactory.cpp
+++ b/modules/ripple_app/node/ripple_NullBackendFactory.cpp
@@ -15,28 +15,31 @@ public:
{
}
- std::string getDataBaseName()
+ std::string getName()
{
return std::string ();
}
- bool store (NodeObject::ref obj)
+ Status fetch (void const*, NodeObject::Ptr*)
+ {
+ return notFound;
+ }
+
+ void store (NodeObject::ref object)
+ {
+ }
+
+ void storeBatch (NodeStore::Batch const& batch)
{
- return false;
}
- bool bulkStore (const std::vector< NodeObject::pointer >& objs)
+ void visitAll (VisitCallback& callback)
{
- return false;
}
- NodeObject::pointer retrieve (uint256 const& hash)
- {
- return NodeObject::pointer ();
- }
-
- void visitAll (FUNCTION_TYPE func)
+ int getWriteLoad ()
{
+ return 0;
}
};
@@ -62,7 +65,10 @@ String NullBackendFactory::getName () const
return "none";
}
-NodeStore::Backend* NullBackendFactory::createInstance (StringPairArray const& keyValues)
+NodeStore::Backend* NullBackendFactory::createInstance (
+ size_t,
+ StringPairArray const&,
+ NodeStore::Scheduler&)
{
return new NullBackendFactory::Backend;
}
diff --git a/modules/ripple_app/node/ripple_NullBackendFactory.h b/modules/ripple_app/node/ripple_NullBackendFactory.h
index 7112473384..a68c1838ea 100644
--- a/modules/ripple_app/node/ripple_NullBackendFactory.h
+++ b/modules/ripple_app/node/ripple_NullBackendFactory.h
@@ -23,7 +23,10 @@ public:
static NullBackendFactory& getInstance ();
String getName () const;
- NodeStore::Backend* createInstance (StringPairArray const& keyValues);
+
+ NodeStore::Backend* createInstance (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler);
};
#endif
diff --git a/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp b/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp
index 0b421ac5be..7b4a7a9dc4 100644
--- a/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp
+++ b/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp
@@ -4,97 +4,177 @@
*/
//==============================================================================
+static const char* s_nodeStoreDBInit [] =
+{
+ "PRAGMA synchronous=NORMAL;",
+ "PRAGMA journal_mode=WAL;",
+ "PRAGMA journal_size_limit=1582080;",
+
+#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
+ "PRAGMA mmap_size=171798691840;",
+#endif
+
+ "BEGIN TRANSACTION;",
+
+ "CREATE TABLE CommittedObjects ( \
+ Hash CHARACTER(64) PRIMARY KEY, \
+ ObjType CHAR(1) NOT NULL, \
+ LedgerIndex BIGINT UNSIGNED, \
+ Object BLOB \
+ );",
+
+ "END TRANSACTION;"
+};
+
+static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit);
+
+//------------------------------------------------------------------------------
+
class SqliteBackendFactory::Backend : public NodeStore::Backend
{
public:
- Backend(std::string const& path) : mName(path)
+ Backend (size_t keyBytes, std::string const& path)
+ : m_keyBytes (keyBytes)
+ , m_name (path)
+ , m_db (new DatabaseCon(path, s_nodeStoreDBInit, s_nodeStoreDBCount))
{
- mDb = new DatabaseCon(path, HashNodeDBInit, HashNodeDBCount);
- mDb->getDB()->executeSQL(boost::str(boost::format("PRAGMA cache_size=-%d;") %
- (theConfig.getSize(siHashNodeDBCache) * 1024)));
+ String s;
+
+ // VFALCO TODO Remove this dependency on theConfig
+ //
+ s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024);
+ m_db->getDB()->executeSQL (s.toStdString ().c_str ());
}
- Backend()
+ ~Backend()
{
- delete mDb;
}
- std::string getDataBaseName()
+ std::string getName()
{
- return mName;
+ return m_name;
}
- bool bulkStore(const std::vector< NodeObject::pointer >& objects)
+ //--------------------------------------------------------------------------
+
+ Status fetch (void const* key, NodeObject::Ptr* pObject)
{
- ScopedLock sl(mDb->getDBLock());
- static SqliteStatement pStB(mDb->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
- static SqliteStatement pStE(mDb->getDB()->getSqliteDB(), "END TRANSACTION;");
- static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
+ Status result = ok;
+
+ pObject->reset ();
+
+ {
+ ScopedLock sl (m_db->getDBLock());
+
+ uint256 const hash (key);
+
+ static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
+ "SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
+
+ pSt.bind (1, hash.GetHex());
+
+ if (pSt.isRow (pSt.step()))
+ {
+ // VFALCO NOTE This is unfortunately needed,
+ // the DatabaseCon creates the blob?
+ Blob data (pSt.getBlob (2));
+ *pObject = NodeObject::createObject (
+ getTypeFromString (pSt.peekString (0)),
+ pSt.getUInt32 (1),
+ data,
+ hash);
+ }
+ else
+ {
+ result = notFound;
+ }
+
+ pSt.reset();
+ }
+
+ return result;
+ }
+
+ void store (NodeObject::ref object)
+ {
+ NodeStore::Batch batch;
+
+ batch.push_back (object);
+
+ storeBatch (batch);
+ }
+
+ void storeBatch (NodeStore::Batch const& batch)
+ {
+ // VFALCO TODO Rewrite this to use Beast::db
+
+ ScopedLock sl (m_db->getDBLock());
+
+ static SqliteStatement pStB (m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
+ static SqliteStatement pStE (m_db->getDB()->getSqliteDB(), "END TRANSACTION;");
+ static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
"INSERT OR IGNORE INTO CommittedObjects "
"(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);");
pStB.step();
pStB.reset();
- BOOST_FOREACH(NodeObject::ref object, objects)
+ BOOST_FOREACH (NodeObject::Ptr const& object, batch)
{
- bind(pSt, object);
+ doBind (pSt, object);
+
pSt.step();
pSt.reset();
}
pStE.step();
pStE.reset();
-
- return true;
-
}
- NodeObject::pointer retrieve(uint256 const& hash)
+ void visitAll (VisitCallback& callback)
{
- NodeObject::pointer ret;
+ // No lock needed as per the visitAll() API
- {
- ScopedLock sl(mDb->getDBLock());
- static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
- "SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
-
- pSt.bind(1, hash.GetHex());
-
- if (pSt.isRow(pSt.step()))
- ret = boost::make_shared(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash);
-
- pSt.reset();
- }
-
- return ret;
- }
-
- void visitAll(FUNCTION_TYPE func)
- {
uint256 hash;
- static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
+ static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
"SELECT ObjType,LedgerIndex,Object,Hash FROM CommittedObjects;");
- while (pSt.isRow(pSt.step()))
+ while (pSt.isRow (pSt.step()))
{
hash.SetHexExact(pSt.getString(3));
- func(boost::make_shared(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash));
+
+ // VFALCO NOTE This is unfortunately needed,
+ // the DatabaseCon creates the blob?
+ Blob data (pSt.getBlob (2));
+ NodeObject::Ptr const object (NodeObject::createObject (
+ getTypeFromString (pSt.peekString (0)),
+ pSt.getUInt32 (1),
+ data,
+ hash));
+
+ callback.visitObject (object);
}
- pSt.reset();
+ pSt.reset ();
}
- void bind(SqliteStatement& statement, NodeObject::ref object)
+ int getWriteLoad ()
+ {
+ return 0;
+ }
+
+ //--------------------------------------------------------------------------
+
+ void doBind (SqliteStatement& statement, NodeObject::ref object)
{
char const* type;
switch (object->getType())
{
- case hotLEDGER: type = "L"; break;
+ case hotLEDGER: type = "L"; break;
case hotTRANSACTION: type = "T"; break;
- case hotACCOUNT_NODE: type = "A"; break;
- case hotTRANSACTION_NODE: type = "N"; break;
+ case hotACCOUNT_NODE: type = "A"; break;
+ case hotTRANSACTION_NODE: type = "N"; break;
default: type = "U";
}
@@ -104,25 +184,27 @@ public:
statement.bindStatic(4, object->getData());
}
- NodeObjectType getType(std::string const& type)
+ NodeObjectType getTypeFromString (std::string const& s)
{
- NodeObjectType htype = hotUNKNOWN;
- if (!type.empty())
+ NodeObjectType type = hotUNKNOWN;
+
+ if (!s.empty ())
{
- switch (type[0])
+ switch (s [0])
{
- case 'L': htype = hotLEDGER; break;
- case 'T': htype = hotTRANSACTION; break;
- case 'A': htype = hotACCOUNT_NODE; break;
- case 'N': htype = hotTRANSACTION_NODE; break;
+ case 'L': type = hotLEDGER; break;
+ case 'T': type = hotTRANSACTION; break;
+ case 'A': type = hotACCOUNT_NODE; break;
+ case 'N': type = hotTRANSACTION_NODE; break;
}
}
- return htype;
+ return type;
}
private:
- std::string mName;
- DatabaseCon* mDb;
+ size_t const m_keyBytes;
+ std::string const m_name;
+ ScopedPointer m_db;
};
//------------------------------------------------------------------------------
@@ -147,7 +229,10 @@ String SqliteBackendFactory::getName () const
return "Sqlite";
}
-NodeStore::Backend* SqliteBackendFactory::createInstance (StringPairArray const& keyValues)
+NodeStore::Backend* SqliteBackendFactory::createInstance (
+ size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler)
{
- return new Backend (keyValues ["path"].toStdString ());
+ return new Backend (keyBytes, keyValues ["path"].toStdString ());
}
diff --git a/modules/ripple_app/node/ripple_SqliteBackendFactory.h b/modules/ripple_app/node/ripple_SqliteBackendFactory.h
index e6420cbde2..828588fd74 100644
--- a/modules/ripple_app/node/ripple_SqliteBackendFactory.h
+++ b/modules/ripple_app/node/ripple_SqliteBackendFactory.h
@@ -21,7 +21,10 @@ public:
static SqliteBackendFactory& getInstance ();
String getName () const;
- NodeStore::Backend* createInstance (StringPairArray const& keyValues);
+
+ NodeStore::Backend* createInstance (size_t keyBytes,
+ StringPairArray const& keyValues,
+ NodeStore::Scheduler& scheduler);
};
#endif
diff --git a/modules/ripple_app/ripple_app.cpp b/modules/ripple_app/ripple_app.cpp
index afd567e708..ae84625c3b 100644
--- a/modules/ripple_app/ripple_app.cpp
+++ b/modules/ripple_app/ripple_app.cpp
@@ -65,6 +65,8 @@
#include "../ripple_core/ripple_core.h"
+#include "beast/modules/beast_db/beast_db.h"
+
// VFALCO TODO fix these warnings!
#ifdef _MSC_VER
//#pragma warning (push) // Causes spurious C4503 "decorated name exceeds maximum length"
@@ -102,8 +104,9 @@ namespace ripple
#include "node/ripple_NodeObject.h"
#include "node/ripple_NodeStore.h"
-#include "node/ripple_LevelDBBackendFactory.h"
#include "node/ripple_HyperLevelDBBackendFactory.h"
+#include "node/ripple_KeyvaDBBackendFactory.h"
+#include "node/ripple_LevelDBBackendFactory.h"
#include "node/ripple_MdbBackendFactory.h"
#include "node/ripple_NullBackendFactory.h"
#include "node/ripple_SqliteBackendFactory.h"
@@ -154,10 +157,10 @@ namespace ripple
#include "src/cpp/ripple/TransactionMaster.h"
#include "src/cpp/ripple/ripple_LocalCredentials.h"
#include "src/cpp/ripple/WSDoor.h"
+#include "src/cpp/ripple/ripple_Application.h"
#include "src/cpp/ripple/RPCHandler.h"
#include "src/cpp/ripple/TransactionQueue.h"
#include "ledger/OrderBookDB.h"
-#include "src/cpp/ripple/ripple_Application.h"
#include "src/cpp/ripple/CallRPC.h"
#include "src/cpp/ripple/Transactor.h"
#include "src/cpp/ripple/ChangeTransactor.h"
@@ -244,10 +247,11 @@ static const uint64 tenTo17m1 = tenTo17 - 1;
#include "basics/ripple_RPCServerHandler.cpp"
#include "node/ripple_NodeObject.cpp"
#include "node/ripple_NodeStore.cpp"
-#include "node/ripple_LevelDBBackendFactory.cpp"
#include "node/ripple_HyperLevelDBBackendFactory.cpp"
-#include "node/ripple_MdbBackendFactory.cpp"
+#include "node/ripple_KeyvaDBBackendFactory.cpp"
+#include "node/ripple_LevelDBBackendFactory.cpp"
#include "node/ripple_NullBackendFactory.cpp"
+#include "node/ripple_MdbBackendFactory.cpp"
#include "node/ripple_SqliteBackendFactory.cpp"
#include "ledger/Ledger.cpp"
@@ -427,7 +431,6 @@ static DH* handleTmpDh (SSL* ssl, int is_export, int iKeyLength)
#include "ledger/LedgerUnitTests.cpp"
#include "src/cpp/ripple/ripple_SHAMapUnitTests.cpp"
#include "src/cpp/ripple/ripple_SHAMapSyncUnitTests.cpp"
-#include "src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp" // Requires ProofOfWorkFactory.h
#include "src/cpp/ripple/ripple_SerializedTransactionUnitTests.cpp"
//------------------------------------------------------------------------------
diff --git a/modules/ripple_basics/containers/ripple_TaggedCache.h b/modules/ripple_basics/containers/ripple_TaggedCache.h
index 1551ebda05..1f3c294887 100644
--- a/modules/ripple_basics/containers/ripple_TaggedCache.h
+++ b/modules/ripple_basics/containers/ripple_TaggedCache.h
@@ -62,9 +62,75 @@ public:
void sweep ();
void clear ();
- bool touch (const key_type& key);
+ /** Refresh the expiration time on a key.
+
+ @param key The key to refresh.
+ @return `true` if the key was found and the object is cached.
+ */
+ bool refreshIfPresent (const key_type& key)
+ {
+ bool found = false;
+
+ // If present, make current in cache
+ boost::recursive_mutex::scoped_lock sl (mLock);
+
+ cache_iterator cit = mCache.find (key);
+
+ if (cit != mCache.end ())
+ {
+ cache_entry& entry = cit->second;
+
+ if (! entry.isCached ())
+ {
+ // Convert weak to strong.
+ entry.ptr = entry.lock ();
+
+ if (entry.isCached ())
+ {
+ // We just put the object back in cache
+ ++mCacheCount;
+ entry.touch ();
+ found = true;
+ }
+ else
+ {
+ // Couldn't get strong pointer,
+ // object fell out of the cache so remove the entry.
+ mCache.erase (cit);
+ }
+ }
+ else
+ {
+ // It's cached so update the timer
+ entry.touch ();
+ found = true;
+ }
+ }
+ else
+ {
+ // not present
+ }
+
+ return found;
+ }
+
bool del (const key_type& key, bool valid);
+
+ /** Replace aliased objects with originals.
+
+ Due to concurrency it is possible for two separate objects with
+ the same content and referring to the same unique "thing" to exist.
+ This routine eliminates the duplicate and performs a replacement
+ on the callers shared pointer if needed.
+
+ @param key The key corresponding to the object
+ @param data A shared pointer to the data corresponding to the object.
+ @param replace `true` if `data` is the up to date version of the object.
+
+ @return `true` if the operation was successful.
+ */
bool canonicalize (const key_type& key, boost::shared_ptr& data, bool replace = false);
+
bool store (const key_type& key, const c_Data& data);
boost::shared_ptr fetch (const key_type& key);
bool retrieve (const key_type& key, c_Data& data);
@@ -264,40 +330,6 @@ void TaggedCache::sweep ()
}
}
-template
-bool TaggedCache::touch (const key_type& key)
-{
- // If present, make current in cache
- boost::recursive_mutex::scoped_lock sl (mLock);
-
- cache_iterator cit = mCache.find (key);
-
- if (cit == mCache.end ()) // Don't have the object
- return false;
-
- cache_entry& entry = cit->second;
-
- if (entry.isCached ())
- {
- entry.touch ();
- return true;
- }
-
- entry.ptr = entry.lock ();
-
- if (entry.isCached ())
- {
- // We just put the object back in cache
- ++mCacheCount;
- entry.touch ();
- return true;
- }
-
- // Object fell out
- mCache.erase (cit);
- return false;
-}
-
template
bool TaggedCache