mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
0e7bac9Fix include pathe5bb90fFix constness of Proxyac0142aUse template castef6e381Add missing Url.h include206e65cFix constness of operator[]695cc38Use template instantiation for friend declaration7b1e03aAdd BaseFromMember49bc04fMake List<>::Node not uncopyabled5954ffAdd Journal to UnitTest58da106Temporarily disable ServiceQueue dtor precondition assertsfe58c1aAdd missing #include2c02580Add PropertyStream for server state introspection24c2315Add ScopedWrapperContexta3845f5Add RelativeTime::value_type typedef7442932Fix missing PropertyStream membersed5a98fMore PropertyStream output for PeerFinderfcfa10dAdd PropertyStream3cf0729Tidy up AbstractHandler usage in HTTPClient55171f4Remove obsolete source files1311ca3Increase arity of SharedFunction67d807dAdd IPEndpoint::key_equalebf395eAdd ErrorCode and boost library2c3ead3Add ServiceQueue::wrap6c7f5d0Move many Thread related classes93e9d86Measure CPU utilization in ServiceQueueca47d72Move ServiceQueue, ThreadLocalValue, SpinLockc864e4dMove WaitableEventff305e6Add CPUMeter and ScopedTimeInterval01fd05cAdd RecursiveMutex, UnlockGuard, TryLockGuard5831a53Remove Journal from most Stoppable overridesb60a7f3Add Request and Response HTTP parsers44445ffRefactor net buffers classesac37c38Beast class refactor8b7056bFix eof on HTTP client get228b664Remove obsolete beast container classes1dfd655Use RelativeTime from startup in DeadlineTimerae22d5dAdd more methods to RelativeTimec67929eRemove unhandled exception catcher2472a90Add 64 bit output for MurmurHashf3d97c7Add RelativeTime::fromStartupb0b8660IPEndpoint better parsingae551cdAdd alternate form string parsing to IPEndpointd0a0dbfDon't break on Throw0e46762Add hasher functors for IPEndpointa1ec423Add Thread::stopThreadAsync4f7dca3Add compiler, stdlib, and platform skeleton to beast/config4394594Tidy up some use of Error for throwe5e0f52Journal console output improvementsf07515eAdd Stoppable prepare and start interfacesd37dd46Move RelativeTime to chrono, add ostream support3f6e7aaAdd console feature to Journalad0064aJournal option to write to Output window (MSVC)0b7574bAdd compilation test scriptcc05ce1Add ServiceQueuee132aabUse boost for functional when the config is set026b926Fix is_continuation for boost versionc807a4eFix invoked_type type reference2ff781bRemove LockFreeStack::size3acb474Add SharedData::ConstAccess7e4c834Add LockFreeStack::empty9c61a6dAdded AbstractHandler, WrapHandler. HTTPClient Fixes.94e40dcFix unittest, by removing recursive call.38bf408Fix nonstandard C++ extension in getNullSink1ef044dBuild fixesd5d3746Fix missing <cmath> include for Gentoo5f231d3Update copyright notice and licenses7b89bf6Add FixedArray, IntrusiveArray, Crypto5c5de57Reorganize beast modules and files9e18bb3Merge commit '43deaaa5cf0d0178a4a6c3cb69c02a2a9a43ec7d' as 'src/beast/beast/http/impl/http-parser'57703acFix BeforeBoost.h includefbc247bAdd Stoppable to beast56496d8IPEndpoint comparisons9d9c822Migrate some headers and general tidying1a3cddcAdd SharedArg and AsyncObject373ca9cAdd HTTPRequest and improvements to HTTPMessage parsing9534516Add some thread classes and fix SharedData with a simple mutex adapter755ab36Make CallQueue unit test runManualc0ca037Remove Beast version printing on startup7efb6a3Reorganize some MPL and Utility classes and files69c26a1Fix missing BeastConfig.h include in Net.cpp40aa552Disable Beast version printing in Ripple BeastConfig.h7b1352dAdd InterruptibleThread unit test68cf759ThreadWithCallQueue unit test adjustment6501deaIPEndpoint parsing and tidying72fc42bMove and add some template metaprogramming classes2a164f0Change filname capitalization (end)6a14f25Change filename capitalization92fd417Move integer types to beast/CStdInt.hebbd9ffMove TargetPlatform.h to beast/Config.h874b524Add IPEndpoint14b34fcTidy up some zlib macro undefines34fffcaRename beast sources for consistency4e59ab2Add CallQueue unit test327d7a6Fixes for consolidated beast unity includesd5ece4eRemove unused and broken classes39f13beRemove unused ConcurrentObject37624a7Add ThreadWithCallQueue unit teste82ec68Remove obsolete beast_Function90551a6Temporarily leave sqlite3 in whatever threading mode it was already in.43ebbb1Fix SharedSingleton to use memoryBarrierf343941Tidy up SharedSingleton doc comments001997eFix leak on exit from Singleton dependency cycle83b9d22Rename to DeadlineTimer::cancel()77874eeUse new instead of ::new for placement2a04dccJournal improvements50965caSharedFunction improvements277e32bAdd LockFreeStack iteratorsd94e4c2Fix undefined behavior in UnsignedIntegerCalc (again)2dc25ceFix DeadlineTimer, callback while holding lock207ffdeFix undefined behavior in UnsignedIntegerCalc1ad8ff9Fix UnsignedInteger::isZero1dd2836Add support for multiprecision integer arithmetic and binary data encodinga45fc47Update .gitignore962a95dTidy up UnsignedIntegerca695faAdd Time::isNull()e96ce99Better random number facilities in UnitTest550b8e5Fine tune UnsignedInteger declaration8e7e3b7Allow negative relative expirations in DeadlineTimerf3dc7ceAdd generic Journal class for loggingbfdda32Make ChildProcess UnitTest manual since it malfunctions02acf7dGeneral refactoring of beast framework classes84ef06eFix ExitHook to derive from AtExitHookf0acc9cReduce the max threads in the Workers unit test55447b0New SharedSingleton, resolves destruction of objects with static storage duration.41eb8a1Remove deprecated SharedPtr::getObject9eda4bcMake SharedObject members const, the counter mutable6eda777Remove deprecated createOnDemandOnce SingletonLifetime option8c522aaFix off by one in pending i/o count on HTTPClient057344eAdd HTTPMessage::toString and familyee728e3Add UniformResourceLocator::emptyae324fbMove ./modules to ./src git-subtree-dir: src/beast git-subtree-split:0e7bac945f
862 lines
25 KiB
C++
862 lines
25 KiB
C++
//------------------------------------------------------------------------------
|
|
/*
|
|
This file is part of Beast: https://github.com/vinniefalco/Beast
|
|
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
|
|
|
Permission to use, copy, modify, and/or distribute this software for any
|
|
purpose with or without fee is hereby granted, provided that the above
|
|
copyright notice and this permission notice appear in all copies.
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
*/
|
|
//==============================================================================
|
|
|
|
/*
|
|
|
|
TODO
|
|
|
|
- Check consistency / range checking on read
|
|
|
|
- Cache top level tree nodes
|
|
|
|
- Coalesce I/O in RandomAccessFile
|
|
|
|
- Delete / file compaction
|
|
|
|
*/
|
|
|
|
class KeyvaDBImp : public KeyvaDB
|
|
{
|
|
private:
|
|
// These are stored in big endian format in the file.
|
|
|
|
// A file offset.
|
|
typedef int64 FileOffset;
|
|
|
|
// Index of a key.
|
|
//
|
|
// The value is broken up into two parts. The key block index,
|
|
// and a 1 based index within the keyblock corresponding to the
|
|
// internal key number.
|
|
//
|
|
typedef int32 KeyIndex;
|
|
typedef int32 KeyBlockIndex;
|
|
|
|
// Size of a value.
|
|
typedef uint32 ByteSize;
|
|
|
|
private:
|
|
// returns the number of keys in a key block with the specified depth
|
|
static int calcKeysAtDepth (int depth)
|
|
{
|
|
return (1U << depth) - 1;
|
|
}
|
|
|
|
// returns the number of bytes in a key record
|
|
static int calcKeyRecordBytes (int keyBytes)
|
|
{
|
|
// This depends on the format of a serialized key record
|
|
return
|
|
sizeof (FileOffset) +
|
|
sizeof (ByteSize) +
|
|
sizeof (KeyIndex) +
|
|
sizeof (KeyIndex) +
|
|
keyBytes
|
|
;
|
|
}
|
|
|
|
// returns the number of bytes in a key block
|
|
static int calcKeyBlockBytes (int depth, int keyBytes)
|
|
{
|
|
return calcKeysAtDepth (depth) * calcKeyRecordBytes (keyBytes);
|
|
}
|
|
|
|
public:
|
|
enum
|
|
{
|
|
currentVersion = 1
|
|
};
|
|
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
struct KeyAddress
|
|
{
|
|
// 1 based key block number
|
|
uint32 blockNumber;
|
|
|
|
// 1 based key index within the block, breadth-first left to right
|
|
uint32 keyNumber;
|
|
};
|
|
|
|
enum
|
|
{
|
|
// The size of the fixed area at the beginning of the key file.
|
|
// This is used to store some housekeeping information like the
|
|
// key size and version number.
|
|
//
|
|
masterHeaderBytes = 1000
|
|
};
|
|
|
|
// The master record is at the beginning of the key file
|
|
struct MasterRecord
|
|
{
|
|
// version number, starting from 1
|
|
int32 version;
|
|
|
|
KeyBlockIndex nextKeyBlockIndex;
|
|
|
|
void write (OutputStream& stream)
|
|
{
|
|
stream.writeTypeBigEndian (version);
|
|
}
|
|
|
|
void read (InputStream& stream)
|
|
{
|
|
stream.readTypeBigEndianInto (&version);
|
|
}
|
|
};
|
|
|
|
// Key records are indexed starting at one.
|
|
struct KeyRecord : public Uncopyable
|
|
{
|
|
explicit KeyRecord (void* const keyStorage)
|
|
: key (keyStorage)
|
|
{
|
|
}
|
|
|
|
// Absolute byte FileOffset in the value file.
|
|
FileOffset valFileOffset;
|
|
|
|
// Size of the corresponding value, in bytes.
|
|
ByteSize valSize;
|
|
|
|
// Key record index of left node, or 0.
|
|
KeyIndex leftIndex;
|
|
|
|
// Key record index of right node, or 0.
|
|
KeyIndex rightIndex;
|
|
|
|
// Points to keyBytes storage of the key.
|
|
void* const key;
|
|
};
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
// A complete keyblock. The contents of the memory for the key block
|
|
// are identical to the format on disk. Therefore it is necessary to
|
|
// use the serialization routines to extract or update the key records.
|
|
//
|
|
class KeyBlock : public Uncopyable
|
|
{
|
|
public:
|
|
KeyBlock (int depth, int keyBytes)
|
|
: m_depth (depth)
|
|
, m_keyBytes (keyBytes)
|
|
, m_storage (calcKeyBlockBytes (depth, keyBytes))
|
|
{
|
|
}
|
|
|
|
void read (InputStream& stream)
|
|
{
|
|
stream.read (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes));
|
|
}
|
|
|
|
void write (OutputStream& stream)
|
|
{
|
|
stream.write (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes));
|
|
}
|
|
|
|
void readKeyRecord (KeyRecord* keyRecord, int keyIndex)
|
|
{
|
|
bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth));
|
|
|
|
size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
|
|
|
|
MemoryInputStream stream (
|
|
addBytesToPointer (m_storage.getData (), byteOffset),
|
|
calcKeyRecordBytes (m_keyBytes),
|
|
false);
|
|
|
|
stream.readTypeBigEndianInto (&keyRecord->valFileOffset);
|
|
stream.readTypeBigEndianInto (&keyRecord->valSize);
|
|
stream.readTypeBigEndianInto (&keyRecord->leftIndex);
|
|
stream.readTypeBigEndianInto (&keyRecord->rightIndex);
|
|
stream.read (keyRecord->key, m_keyBytes);
|
|
}
|
|
|
|
#if 0
|
|
void writeKeyRecord (KeyRecord const& keyRecord, int keyIndex)
|
|
{
|
|
bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth));
|
|
|
|
#if 0
|
|
size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
|
|
|
|
MemoryOutputStream stream (
|
|
addBytesToPointer (m_storage.getData (), byteOffset),
|
|
calcKeyRecordBytes (m_keyBytes));
|
|
|
|
stream.writeTypeBigEndian (keyRecord.valFileOffset);
|
|
stream.writeTypeBigEndian (keyRecord.valSize);
|
|
stream.writeTypeBigEndian (keyRecord.leftIndex);
|
|
stream.writeTypeBigEndian (keyRecord.rightIndex);
|
|
stream.write (keyRecord.key, m_keyBytes);
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
private:
|
|
int const m_depth;
|
|
int const m_keyBytes;
|
|
MemoryBlock m_storage;
|
|
};
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
// Concurrent data
|
|
//
|
|
struct State
|
|
{
|
|
RandomAccessFile keyFile;
|
|
RandomAccessFile valFile;
|
|
MasterRecord masterRecord;
|
|
KeyIndex newKeyIndex;
|
|
FileOffset valFileSize;
|
|
|
|
bool hasKeys () const noexcept
|
|
{
|
|
return newKeyIndex > 1;
|
|
}
|
|
};
|
|
|
|
typedef SharedData <State> SharedState;
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
int const m_keyBytes;
|
|
int const m_keyBlockDepth;
|
|
SharedState m_state;
|
|
HeapBlock <char> m_keyStorage;
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
KeyvaDBImp (int keyBytes,
|
|
int keyBlockDepth,
|
|
File keyPath,
|
|
File valPath)
|
|
: m_keyBytes (keyBytes)
|
|
, m_keyBlockDepth (keyBlockDepth)
|
|
, m_keyStorage (keyBytes)
|
|
{
|
|
SharedState::Access state (m_state);
|
|
|
|
openFile (&state->keyFile, keyPath);
|
|
|
|
int64 const fileSize = state->keyFile.getFile ().getSize ();
|
|
|
|
if (fileSize == 0)
|
|
{
|
|
// VFALCO TODO Better error handling here
|
|
// initialize the key file
|
|
Result result = state->keyFile.setPosition (masterHeaderBytes - 1);
|
|
if (result.wasOk ())
|
|
{
|
|
char byte = 0;
|
|
|
|
result = state->keyFile.write (&byte, 1);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
state->keyFile.flush ();
|
|
}
|
|
}
|
|
}
|
|
|
|
state->newKeyIndex = 1 + static_cast <KeyIndex> ((state->keyFile.getFile ().getSize () - masterHeaderBytes)
|
|
/ calcKeyRecordBytes (m_keyBytes));
|
|
|
|
openFile (&state->valFile, valPath);
|
|
|
|
state->valFileSize = state->valFile.getFile ().getSize ();
|
|
}
|
|
|
|
~KeyvaDBImp ()
|
|
{
|
|
SharedState::Access state (m_state);
|
|
|
|
flushInternal (state);
|
|
}
|
|
|
|
// Open a file for reading and writing.
|
|
// Creates the file if it doesn't exist.
|
|
static void openFile (RandomAccessFile* file, File path)
|
|
{
|
|
Result const result = file->open (path, RandomAccessFile::readWrite);
|
|
|
|
if (! result)
|
|
{
|
|
String s;
|
|
s << "KeyvaDB: Couldn't open " << path.getFileName () << " for writing.";
|
|
Throw (std::runtime_error (s.toStdString ()));
|
|
}
|
|
}
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
Result createMasterRecord (SharedState::Access& state)
|
|
{
|
|
MemoryBlock buffer (masterHeaderBytes, true);
|
|
|
|
Result result = state->keyFile.setPosition (0);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
//MasterRecord mr;
|
|
|
|
//mr.version = 1;
|
|
|
|
result = state->keyFile.write (buffer.getData (), buffer.getSize ());
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
FileOffset calcKeyRecordOffset (KeyIndex keyIndex)
|
|
{
|
|
bassert (keyIndex > 0);
|
|
|
|
FileOffset const byteOffset = masterHeaderBytes + (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
|
|
|
|
return byteOffset;
|
|
}
|
|
|
|
// Read a key record into memory.
|
|
// VFALCO TODO Return a Result and do validity checking on all inputs
|
|
//
|
|
void readKeyRecord (KeyRecord* const keyRecord,
|
|
KeyIndex const keyIndex,
|
|
SharedState::Access& state)
|
|
{
|
|
FileOffset const byteOffset = calcKeyRecordOffset (keyIndex);
|
|
|
|
Result result = state->keyFile.setPosition (byteOffset);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
MemoryBlock data (calcKeyRecordBytes (m_keyBytes));
|
|
|
|
size_t bytesRead;
|
|
|
|
result = state->keyFile.read (data.getData (), calcKeyRecordBytes (m_keyBytes), &bytesRead);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
if (bytesRead == static_cast <size_t> (calcKeyRecordBytes (m_keyBytes)))
|
|
{
|
|
MemoryInputStream stream (data, false);
|
|
|
|
// This defines the file format!
|
|
stream.readTypeBigEndianInto (&keyRecord->valFileOffset);
|
|
stream.readTypeBigEndianInto (&keyRecord->valSize);
|
|
stream.readTypeBigEndianInto (&keyRecord->leftIndex);
|
|
stream.readTypeBigEndianInto (&keyRecord->rightIndex);
|
|
|
|
// Grab the key
|
|
stream.read (keyRecord->key, m_keyBytes);
|
|
}
|
|
else
|
|
{
|
|
result = Result::fail ("KeyvaDB: amountRead != calcKeyRecordBytes()");
|
|
}
|
|
}
|
|
}
|
|
|
|
if (! result.wasOk ())
|
|
{
|
|
String s;
|
|
s << "KeyvaDB readKeyRecord failed in " << state->keyFile.getFile ().getFileName ();
|
|
Throw (std::runtime_error (s.toStdString ()));
|
|
}
|
|
}
|
|
|
|
// Write a key record from memory
|
|
void writeKeyRecord (KeyRecord const& keyRecord,
|
|
KeyIndex const keyIndex,
|
|
SharedState::Access& state,
|
|
bool includingKey)
|
|
{
|
|
FileOffset const byteOffset = calcKeyRecordOffset (keyIndex);
|
|
|
|
int const bytes = calcKeyRecordBytes (m_keyBytes) - (includingKey ? 0 : m_keyBytes);
|
|
|
|
// VFALCO TODO Recycle this buffer
|
|
MemoryBlock data (bytes);
|
|
|
|
{
|
|
MemoryOutputStream stream (data, false);
|
|
|
|
// This defines the file format!
|
|
stream.writeTypeBigEndian (keyRecord.valFileOffset);
|
|
stream.writeTypeBigEndian (keyRecord.valSize);
|
|
stream.writeTypeBigEndian (keyRecord.leftIndex);
|
|
stream.writeTypeBigEndian (keyRecord.rightIndex);
|
|
|
|
// Write the key
|
|
if (includingKey)
|
|
stream.write (keyRecord.key, m_keyBytes);
|
|
}
|
|
|
|
Result result = state->keyFile.setPosition (byteOffset);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
size_t bytesWritten;
|
|
|
|
result = state->keyFile.write (data.getData (), bytes, &bytesWritten);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
if (bytesWritten != static_cast <size_t> (bytes))
|
|
{
|
|
result = Result::fail ("KeyvaDB: bytesWritten != bytes");
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!result.wasOk ())
|
|
{
|
|
String s;
|
|
s << "KeyvaDB: writeKeyRecord failed in " << state->keyFile.getFile ().getFileName ();
|
|
Throw (std::runtime_error (s.toStdString ()));
|
|
}
|
|
}
|
|
|
|
// Append a value to the value file.
|
|
// VFALCO TODO return a Result
|
|
void writeValue (void const* const value, ByteSize valueBytes, SharedState::Access& state)
|
|
{
|
|
Result result = state->valFile.setPosition (state->valFileSize);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
size_t bytesWritten;
|
|
|
|
result = state->valFile.write (value, valueBytes, &bytesWritten);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
if (bytesWritten == valueBytes)
|
|
{
|
|
state->valFileSize += valueBytes;
|
|
}
|
|
else
|
|
{
|
|
result = Result::fail ("KeyvaDB: bytesWritten != valueBytes");
|
|
}
|
|
}
|
|
}
|
|
|
|
if (! result.wasOk ())
|
|
{
|
|
String s;
|
|
s << "KeyvaDB: writeValue failed in " << state->valFile.getFile ().getFileName ();
|
|
Throw (std::runtime_error (s.toStdString ()));
|
|
}
|
|
}
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
struct FindResult : public Uncopyable
|
|
{
|
|
FindResult (void* const keyStorage)
|
|
: keyRecord (keyStorage)
|
|
{
|
|
}
|
|
|
|
int compare; // result of the last comparison
|
|
KeyIndex keyIndex; // index we looked at last
|
|
//KeyBlock keyBlock; // KeyBlock we looked at last
|
|
KeyRecord keyRecord; // KeyRecord we looked at last
|
|
};
|
|
|
|
// Find a key. If the key doesn't exist, enough information
|
|
// is left behind in the result to perform an insertion.
|
|
//
|
|
// Returns true if the key was found.
|
|
//
|
|
bool find (FindResult* findResult, void const* key, SharedState::Access& state)
|
|
{
|
|
// Not okay to call this with an empty key file!
|
|
bassert (state->hasKeys ());
|
|
|
|
// This performs a standard binary search
|
|
|
|
findResult->keyIndex = 1;
|
|
|
|
do
|
|
{
|
|
readKeyRecord (&findResult->keyRecord, findResult->keyIndex, state);
|
|
|
|
findResult->compare = memcmp (key, findResult->keyRecord.key, m_keyBytes);
|
|
|
|
if (findResult->compare < 0)
|
|
{
|
|
if (findResult->keyRecord.leftIndex != 0)
|
|
{
|
|
// Go left
|
|
findResult->keyIndex = findResult->keyRecord.leftIndex;
|
|
}
|
|
else
|
|
{
|
|
// Insert position is to the left
|
|
break;
|
|
}
|
|
}
|
|
else if (findResult->compare > 0)
|
|
{
|
|
if (findResult->keyRecord.rightIndex != 0)
|
|
{
|
|
// Go right
|
|
findResult->keyIndex = findResult->keyRecord.rightIndex;
|
|
}
|
|
else
|
|
{
|
|
// Insert position is to the right
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
while (findResult->compare != 0);
|
|
|
|
return findResult->compare == 0;
|
|
}
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
bool get (void const* key, GetCallback* callback)
|
|
{
|
|
FindResult findResult (m_keyStorage.getData ());
|
|
|
|
SharedState::Access state (m_state);
|
|
|
|
bool found = false;
|
|
|
|
if (state->hasKeys ())
|
|
{
|
|
found = find (&findResult, key, state);
|
|
|
|
if (found)
|
|
{
|
|
void* const destStorage = callback->getStorageForValue (findResult.keyRecord.valSize);
|
|
|
|
Result result = state->valFile.setPosition (findResult.keyRecord.valFileOffset);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
size_t bytesRead;
|
|
|
|
result = state->valFile.read (destStorage, findResult.keyRecord.valSize, &bytesRead);
|
|
|
|
if (result.wasOk ())
|
|
{
|
|
if (bytesRead != findResult.keyRecord.valSize)
|
|
{
|
|
result = Result::fail ("KeyvaDB: bytesRead != valSize");
|
|
}
|
|
}
|
|
}
|
|
|
|
if (! result.wasOk ())
|
|
{
|
|
String s;
|
|
s << "KeyvaDB: get in " << state->valFile.getFile ().getFileName ();
|
|
Throw (std::runtime_error (s.toStdString ()));
|
|
}
|
|
}
|
|
}
|
|
|
|
return found;
|
|
}
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
// Write a key value pair. Does nothing if the key exists.
|
|
void put (void const* key, void const* value, int valueBytes)
|
|
{
|
|
bassert (valueBytes > 0);
|
|
|
|
SharedState::Access state (m_state);
|
|
|
|
if (state->hasKeys ())
|
|
{
|
|
// Search for the key
|
|
|
|
FindResult findResult (m_keyStorage.getData ());
|
|
|
|
bool const found = find (&findResult, key, state);
|
|
|
|
if (! found )
|
|
{
|
|
bassert (findResult.compare != 0);
|
|
|
|
// Binary tree insertion.
|
|
// Link the last key record to the new key
|
|
{
|
|
if (findResult.compare < 0)
|
|
{
|
|
findResult.keyRecord.leftIndex = state->newKeyIndex;
|
|
}
|
|
else
|
|
{
|
|
findResult.keyRecord.rightIndex = state->newKeyIndex;
|
|
}
|
|
|
|
writeKeyRecord (findResult.keyRecord, findResult.keyIndex, state, false);
|
|
}
|
|
|
|
// Write the new key
|
|
{
|
|
findResult.keyRecord.valFileOffset = state->valFileSize;
|
|
findResult.keyRecord.valSize = valueBytes;
|
|
findResult.keyRecord.leftIndex = 0;
|
|
findResult.keyRecord.rightIndex = 0;
|
|
|
|
memcpy (findResult.keyRecord.key, key, m_keyBytes);
|
|
|
|
writeKeyRecord (findResult.keyRecord, state->newKeyIndex, state, true);
|
|
}
|
|
|
|
// Key file has grown by one.
|
|
++state->newKeyIndex;
|
|
|
|
// Write the value
|
|
writeValue (value, valueBytes, state);
|
|
}
|
|
else
|
|
{
|
|
// Key already exists, do nothing.
|
|
// We could check to make sure the payloads are the same.
|
|
}
|
|
}
|
|
else
|
|
{
|
|
//
|
|
// Write first key
|
|
//
|
|
|
|
KeyRecord keyRecord (m_keyStorage.getData ());
|
|
|
|
keyRecord.valFileOffset = state->valFileSize;
|
|
keyRecord.valSize = valueBytes;
|
|
keyRecord.leftIndex = 0;
|
|
keyRecord.rightIndex = 0;
|
|
|
|
memcpy (keyRecord.key, key, m_keyBytes);
|
|
|
|
writeKeyRecord (keyRecord, state->newKeyIndex, state, true);
|
|
|
|
// Key file has grown by one.
|
|
++state->newKeyIndex;
|
|
|
|
//
|
|
// Write value
|
|
//
|
|
|
|
bassert (state->valFileSize == 0);
|
|
|
|
writeValue (value, valueBytes, state);
|
|
}
|
|
}
|
|
|
|
//--------------------------------------------------------------------------
|
|
|
|
void flush ()
|
|
{
|
|
SharedState::Access state (m_state);
|
|
|
|
flushInternal (state);
|
|
}
|
|
|
|
void flushInternal (SharedState::Access& state)
|
|
{
|
|
state->keyFile.flush ();
|
|
state->valFile.flush ();
|
|
}
|
|
};
|
|
|
|
KeyvaDB* KeyvaDB::New (int keyBytes, int keyBlockDepth, File keyPath, File valPath)
|
|
{
|
|
return new KeyvaDBImp (keyBytes, keyBlockDepth, keyPath, valPath);
|
|
}
|
|
|
|
//------------------------------------------------------------------------------
|
|
|
|
class KeyvaDBTests : public UnitTest
|
|
{
|
|
public:
|
|
enum
|
|
{
|
|
maxPayloadBytes = 8 * 1024
|
|
};
|
|
|
|
// Retrieval callback stores the value in a Payload object for comparison
|
|
struct PayloadGetCallback : KeyvaDB::GetCallback
|
|
{
|
|
UnitTestUtilities::Payload payload;
|
|
|
|
PayloadGetCallback () : payload (maxPayloadBytes)
|
|
{
|
|
}
|
|
|
|
void* getStorageForValue (int valueBytes)
|
|
{
|
|
bassert (valueBytes <= maxPayloadBytes);
|
|
|
|
payload.bytes = valueBytes;
|
|
|
|
return payload.data.getData ();
|
|
}
|
|
};
|
|
|
|
KeyvaDB* createDB (unsigned int keyBytes, File const& path)
|
|
{
|
|
File const keyPath = path.withFileExtension (".key");
|
|
File const valPath = path.withFileExtension (".val");
|
|
|
|
return KeyvaDB::New (keyBytes, 1, keyPath, valPath);
|
|
}
|
|
|
|
void deleteDBFiles (File const& path)
|
|
{
|
|
File const keyPath = path.withFileExtension (".key");
|
|
File const valPath = path.withFileExtension (".val");
|
|
|
|
keyPath.deleteFile ();
|
|
valPath.deleteFile ();
|
|
}
|
|
|
|
template <size_t KeyBytes>
|
|
void testKeySize (unsigned int const maxItems)
|
|
{
|
|
using namespace UnitTestUtilities;
|
|
|
|
typedef UnsignedInteger <KeyBytes> KeyType;
|
|
|
|
int64 const seedValue = 50;
|
|
|
|
String s;
|
|
|
|
s << "keyBytes=" << String (uint64(KeyBytes)) << ", maxItems=" << String (maxItems);
|
|
beginTestCase (s);
|
|
|
|
// Set up the key and value files
|
|
File const path (File::createTempFile (""));
|
|
|
|
{
|
|
// open the db
|
|
ScopedPointer <KeyvaDB> db (createDB (KeyBytes, path));
|
|
|
|
Payload payload (maxPayloadBytes);
|
|
Payload check (maxPayloadBytes);
|
|
|
|
{
|
|
// Create an array of ascending integers.
|
|
HeapBlock <unsigned int> items (maxItems);
|
|
for (unsigned int i = 0; i < maxItems; ++i)
|
|
items [i] = i;
|
|
|
|
// Now shuffle it deterministically.
|
|
repeatableShuffle (maxItems, items, seedValue);
|
|
|
|
// Write all the keys of integers.
|
|
for (unsigned int i = 0; i < maxItems; ++i)
|
|
{
|
|
unsigned int keyIndex = items [i];
|
|
|
|
KeyType const key = KeyType::createFromInteger (keyIndex);
|
|
|
|
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
|
|
|
db->put (key.cbegin (), payload.data.getData (), payload.bytes);
|
|
|
|
{
|
|
// VFALCO TODO Check what we just wrote?
|
|
//db->get (key.cbegin (), check.data.getData (), payload.bytes);
|
|
}
|
|
}
|
|
}
|
|
|
|
{
|
|
// Go through all of our keys and try to retrieve them.
|
|
// since this is done in ascending order, we should get
|
|
// random seeks at this point.
|
|
//
|
|
PayloadGetCallback cb;
|
|
for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
|
|
{
|
|
KeyType const v = KeyType::createFromInteger (keyIndex);
|
|
|
|
bool const found = db->get (v.cbegin (), &cb);
|
|
|
|
expect (found, "Should be found");
|
|
|
|
if (found)
|
|
{
|
|
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
|
|
|
expect (payload == cb.payload, "Should be equal");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
{
|
|
// Re-open the database and confirm the data
|
|
ScopedPointer <KeyvaDB> db (createDB (KeyBytes, path));
|
|
|
|
Payload payload (maxPayloadBytes);
|
|
Payload check (maxPayloadBytes);
|
|
|
|
PayloadGetCallback cb;
|
|
for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
|
|
{
|
|
KeyType const v = KeyType::createFromInteger (keyIndex);
|
|
|
|
bool const found = db->get (v.cbegin (), &cb);
|
|
|
|
expect (found, "Should be found");
|
|
|
|
if (found)
|
|
{
|
|
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
|
|
|
expect (payload == cb.payload, "Should be equal");
|
|
}
|
|
}
|
|
}
|
|
|
|
deleteDBFiles (path);
|
|
}
|
|
|
|
void runTest ()
|
|
{
|
|
testKeySize <4> (500);
|
|
testKeySize <32> (4000);
|
|
}
|
|
|
|
KeyvaDBTests () : UnitTest ("KeyvaDB", "beast", runManual)
|
|
{
|
|
}
|
|
};
|
|
|
|
static KeyvaDBTests keyvaDBTests;
|