diff --git a/Builds/VisualStudio2013/RippleD.vcxproj b/Builds/VisualStudio2013/RippleD.vcxproj
index c098808a3..93af249fc 100755
--- a/Builds/VisualStudio2013/RippleD.vcxproj
+++ b/Builds/VisualStudio2013/RippleD.vcxproj
@@ -2112,6 +2112,8 @@
+
+
diff --git a/Builds/VisualStudio2013/RippleD.vcxproj.filters b/Builds/VisualStudio2013/RippleD.vcxproj.filters
index 51784ab98..db5af27b8 100644
--- a/Builds/VisualStudio2013/RippleD.vcxproj.filters
+++ b/Builds/VisualStudio2013/RippleD.vcxproj.filters
@@ -3072,6 +3072,9 @@
ripple\nodestore\impl
+
+ ripple\nodestore\impl
+
ripple\nodestore\impl
diff --git a/src/ripple/nodestore/backend/NuDBFactory.cpp b/src/ripple/nodestore/backend/NuDBFactory.cpp
index 6c37593d8..3d7c07117 100644
--- a/src/ripple/nodestore/backend/NuDBFactory.cpp
+++ b/src/ripple/nodestore/backend/NuDBFactory.cpp
@@ -21,14 +21,14 @@
#include
#include
+#include
#include
#include
#include
-#include // remove asap
+#include
#include
#include
#include
-#include
#include
#include
#include
@@ -50,22 +50,11 @@ public:
// distribution of data sizes.
arena_alloc_size = 16 * 1024 * 1024,
- // Version 1
- // No compression
- //
- typeOne = 1,
-
- // Version 2
- // Snappy compression
- typeTwo = 2,
-
-
-
- currentType = typeTwo
+ currentType = 1
};
using api = beast::nudb::api<
- beast::xxhasher, beast::nudb::identity_codec>;
+ beast::xxhasher, nodeobject_codec>;
beast::Journal journal_;
size_t const keyBytes_;
@@ -137,74 +126,8 @@ public:
}
}
- //--------------------------------------------------------------------------
-
- class Buffer
- {
- private:
- std::size_t size_ = 0;
- std::size_t capacity_ = 0;
- std::unique_ptr buf_;
-
- public:
- Buffer() = default;
- Buffer (Buffer const&) = delete;
- Buffer& operator= (Buffer const&) = delete;
-
- explicit
- Buffer (std::size_t n)
- {
- resize (n);
- }
-
- std::size_t
- size() const
- {
- return size_;
- }
-
- std::size_t
- capacity() const
- {
- return capacity_;
- }
-
- void*
- get()
- {
- return buf_.get();
- }
-
- void
- resize (std::size_t n)
- {
- if (capacity_ < n)
- {
- capacity_ = beast::nudb::detail::ceil_pow2(n);
- buf_.reset (new std::uint8_t[capacity_]);
- }
- size_ = n;
- }
-
- // Meet the requirements of BufferFactory
- void*
- operator() (std::size_t n)
- {
- resize(n);
- return get();
- }
- };
-
- //--------------------------------------------------------------------------
- //
- // Version 1 Database
- //
- // Uncompressed
- //
-
Status
- fetch1 (void const* key,
- std::shared_ptr * pno)
+ fetch (void const* key, NodeObject::Ptr* pno)
{
Status status;
pno->reset();
@@ -226,94 +149,13 @@ public:
return status;
}
- void
- insert1 (void const* key, void const* data,
- std::size_t size)
- {
- db_.insert (key, data, size);
- }
-
- //--------------------------------------------------------------------------
- //
- // Version 2 Database
- //
- // Snappy compression
- //
-
- Status
- fetch2 (void const* key,
- std::shared_ptr * pno)
- {
- Status status;
- pno->reset();
- if (! db_.fetch (key,
- [&](void const* data, std::size_t size)
- {
- std::size_t actual;
- if (! snappy::GetUncompressedLength(
- (char const*)data, size, &actual))
- {
- status = dataCorrupt;
- return;
- }
- std::unique_ptr buf (new char[actual]);
- snappy::RawUncompress (
- (char const*)data, size, buf.get());
- DecodedBlob decoded (key, buf.get(), actual);
- if (! decoded.wasOk ())
- {
- status = dataCorrupt;
- return;
- }
- *pno = decoded.createObject();
- status = ok;
- }))
- {
- return notFound;
- }
-
- return status;
- }
-
- void
- insert2 (void const* key, void const* data,
- std::size_t size)
- {
- std::unique_ptr buf (
- new char[snappy::MaxCompressedLength(size)]);
- std::size_t actual;
- snappy::RawCompress ((char const*)data, size,
- buf.get(), &actual);
- db_.insert (key, buf.get(), actual);
- }
-
- //--------------------------------------------------------------------------
-
- Status
- fetch (void const* key, NodeObject::Ptr* pno)
- {
- switch (db_.appnum())
- {
- case typeOne: return fetch1 (key, pno);
- case typeTwo: return fetch2 (key, pno);
- }
- throw std::runtime_error(
- "nodestore: unknown appnum");
- return notFound;
- }
-
void
do_insert (std::shared_ptr const& no)
{
EncodedBlob e;
e.prepare (no);
- switch (db_.appnum())
- {
- case typeOne: return insert1 (e.getKey(), e.getData(), e.getSize());
- case typeTwo: return insert2 (e.getKey(), e.getData(), e.getSize());
- }
- throw std::runtime_error(
- "nodestore: unknown appnum");
+ db_.insert (e.getKey(),
+ e.getData(), e.getSize());
}
void
@@ -352,40 +194,17 @@ public:
auto const dp = db_.dat_path();
auto const kp = db_.key_path();
auto const lp = db_.log_path();
- auto const appnum = db_.appnum();
+ //auto const appnum = db_.appnum();
db_.close();
api::visit (dp,
[&](
void const* key, std::size_t key_bytes,
void const* data, std::size_t size)
{
- switch (appnum)
- {
- case typeOne:
- {
- DecodedBlob decoded (key, data, size);
- if (! decoded.wasOk ())
- return false;
- f (decoded.createObject());
- break;
- }
- case typeTwo:
- {
- std::size_t actual;
- if (! snappy::GetUncompressedLength(
- (char const*)data, size, &actual))
- return false;
- std::unique_ptr buf (new char[actual]);
- if (! snappy::RawUncompress ((char const*)data,
- size, buf.get()))
- return false;
- DecodedBlob decoded (key, buf.get(), actual);
- if (! decoded.wasOk ())
- return false;
- f (decoded.createObject());
- break;
- }
- }
+ DecodedBlob decoded (key, data, size);
+ if (! decoded.wasOk ())
+ return false;
+ f (decoded.createObject());
return true;
});
db_.open (dp, kp, lp,
diff --git a/src/ripple/nodestore/impl/DecodedBlob.cpp b/src/ripple/nodestore/impl/DecodedBlob.cpp
index e16268ef0..8fbb914c8 100644
--- a/src/ripple/nodestore/impl/DecodedBlob.cpp
+++ b/src/ripple/nodestore/impl/DecodedBlob.cpp
@@ -58,10 +58,10 @@ DecodedBlob::DecodedBlob (void const* key, void const* value, int valueBytes)
switch (m_objectType)
{
- case hotUNKNOWN:
default:
break;
+ case hotUNKNOWN:
case hotLEDGER:
case hotTRANSACTION:
case hotACCOUNT_NODE:
diff --git a/src/ripple/nodestore/impl/codec.h b/src/ripple/nodestore/impl/codec.h
new file mode 100644
index 000000000..102942a50
--- /dev/null
+++ b/src/ripple/nodestore/impl/codec.h
@@ -0,0 +1,512 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of rippled: https://github.com/ripple/rippled
+ Copyright (c) 2012, 2013 Ripple Labs Inc.
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef RIPPLE_NODESTORE_CODEC_H_INCLUDED
+#define RIPPLE_NODESTORE_CODEC_H_INCLUDED
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace ripple {
+namespace NodeStore {
+
+namespace detail {
+
+template
+std::pair
+snappy_compress (void const* in,
+ std::size_t in_size, BufferFactory&& bf)
+{
+ std::pair result;
+ auto const out_max =
+ snappy::MaxCompressedLength(in_size);
+ void* const out = bf(out_max);
+ result.first = out;
+ snappy::RawCompress(
+ reinterpret_cast(in),
+ in_size, reinterpret_cast(out),
+ &result.second);
+ return result;
+}
+
+template
+std::pair
+snappy_decompress (void const* in,
+ std::size_t in_size, BufferFactory&& bf)
+{
+ std::pair result;
+ if (! snappy::GetUncompressedLength(
+ reinterpret_cast(in),
+ in_size, &result.second))
+ throw beast::nudb::codec_error(
+ "snappy decompress");
+ void* const out = bf(result.second);
+ result.first = out;
+ if (! snappy::RawUncompress(
+ reinterpret_cast(in), in_size,
+ reinterpret_cast(out)))
+ throw beast::nudb::codec_error(
+ "snappy decompress");
+ return result;
+}
+
+template
+std::pair
+lz4_decompress (void const* in,
+ std::size_t in_size, BufferFactory&& bf)
+{
+ using beast::nudb::codec_error;
+ using namespace beast::nudb::detail;
+ std::pair result;
+ std::uint8_t const* p = reinterpret_cast<
+ std::uint8_t const*>(in);
+ auto const n = read_varint(
+ p, in_size, result.second);
+ if (n == 0)
+ throw codec_error(
+ "lz4 decompress");
+ void* const out = bf(result.second);
+ result.first = out;
+ if (LZ4_decompress_fast(
+ reinterpret_cast(in) + n,
+ reinterpret_cast(out),
+ result.second) + n != in_size)
+ throw codec_error(
+ "lz4 decompress");
+ return result;
+}
+
+template
+std::pair
+lz4_compress (void const* in,
+ std::size_t in_size, BufferFactory&& bf)
+{
+ using beast::nudb::codec_error;
+ using namespace beast::nudb::detail;
+ std::pair result;
+ std::array::max> vi;
+ auto const n = write_varint(
+ vi.data(), in_size);
+ auto const out_max =
+ LZ4_compressBound(in_size);
+ std::uint8_t* out = reinterpret_cast<
+ std::uint8_t*>(bf(n + out_max));
+ result.first = out;
+ std::memcpy(out, vi.data(), n);
+ auto const out_size = LZ4_compress(
+ reinterpret_cast(in),
+ reinterpret_cast(out + n),
+ in_size);
+ if (out_size == 0)
+ throw codec_error(
+ "lz4 compress");
+ result.second = n + out_size;
+ return result;
+}
+
+//------------------------------------------------------------------------------
+
+/*
+ object types:
+
+ 0 = Uncompressed
+ 1 = lz4 compressed
+ 2 = inner node compressed
+ 3 = full inner node
+*/
+
+template
+std::pair
+nodeobject_decompress (void const* in,
+ std::size_t in_size, BufferFactory&& bf)
+{
+ using beast::nudb::codec_error;
+ using namespace beast::nudb::detail;
+
+ std::uint8_t const* p = reinterpret_cast<
+ std::uint8_t const*>(in);
+ std::size_t type;
+ auto const vn = read_varint(
+ p, in_size, type);
+ if (vn == 0)
+ throw codec_error(
+ "nodeobject decompress");
+ p += vn;
+ in_size -= vn;
+
+ std::pair result;
+ switch(type)
+ {
+ case 0: // uncompressed
+ {
+ result.first = p;
+ result.second = in_size;
+ break;
+ }
+ case 1: // lz4
+ {
+ result = lz4_decompress(
+ p, in_size, bf);
+ break;
+ }
+ case 2: // inner node
+ {
+ auto const hs =
+ field::size; // Mask
+ if (in_size < hs + 32)
+ throw codec_error(
+ "nodeobject codec: short inner node");
+ istream is(p, in_size);
+ std::uint16_t mask;
+ read(is, mask); // Mask
+ in_size -= hs;
+ result.second = 525;
+ void* const out = bf(result.second);
+ result.first = out;
+ ostream os(out, result.second);
+ write(os, 0);
+ write(os, 0);
+ write (os, hotUNKNOWN);
+ write(os, HashPrefix::innerNode);
+ if (mask == 0)
+ throw codec_error(
+ "nodeobject codec: empty inner node");
+ std::uint16_t bit = 0x8000;
+ for (int i = 16; i--; bit >>= 1)
+ {
+ if (mask & bit)
+ {
+ if (in_size < 32)
+ throw codec_error(
+ "nodeobject codec: short inner node");
+ std::memcpy(os.data(32), is(32), 32);
+ in_size -= 32;
+ }
+ else
+ {
+ std::memset(os.data(32), 0, 32);
+ }
+ }
+ if (in_size > 0)
+ throw codec_error(
+ "nodeobject codec: long inner node");
+ break;
+ }
+ case 3: // full inner node
+ {
+ if (in_size != 16 * 32) // hashes
+ throw codec_error(
+ "nodeobject codec: short full inner node");
+ istream is(p, in_size);
+ result.second = 525;
+ void* const out = bf(result.second);
+ result.first = out;
+ ostream os(out, result.second);
+ write(os, 0);
+ write(os, 0);
+ write (os, hotUNKNOWN);
+ write(os, HashPrefix::innerNode);
+ write(os, is(512), 512);
+ break;
+ }
+ default:
+ throw codec_error(
+ "nodeobject codec: bad type=" +
+ std::to_string(type));
+ };
+ return result;
+}
+
+template
+void const*
+zero32()
+{
+ static std::array v =
+ []
+ {
+ std::array v;
+ v.fill(0);
+ return v;
+ }();
+ return v.data();
+}
+
+template
+std::pair
+nodeobject_compress (void const* in,
+ std::size_t in_size, BufferFactory&& bf)
+{
+ using beast::nudb::codec_error;
+ using namespace beast::nudb::detail;
+
+ std::size_t type = 1;
+ // Check for inner node
+ if (in_size == 525)
+ {
+ istream is(in, in_size);
+ std::uint32_t index;
+ std::uint32_t unused;
+ std::uint8_t kind;
+ std::uint32_t prefix;
+ read(is, index);
+ read(is, unused);
+ read (is, kind);
+ read(is, prefix);
+ if (prefix == HashPrefix::innerNode)
+ {
+ std::size_t n = 0;
+ std::uint16_t mask = 0;
+ std::array<
+ std::uint8_t, 512> vh;
+ for (unsigned bit = 0x8000;
+ bit; bit >>= 1)
+ {
+ void const* const h = is(32);
+ if (std::memcmp(
+ h, zero32(), 32) == 0)
+ continue;
+ std::memcpy(
+ vh.data() + 32 * n, h, 32);
+ mask |= bit;
+ ++n;
+ }
+ std::pair result;
+ if (n < 16)
+ {
+ // 2 = inner node compressed
+ auto const type = 2U;
+ auto const vs = size_varint(type);
+ result.second =
+ vs +
+ field::size + // mask
+ n * 32; // hashes
+ std::uint8_t* out = reinterpret_cast<
+ std::uint8_t*>(bf(result.second));
+ result.first = out;
+ ostream os(out, result.second);
+ write(os, type);
+ write(os, mask);
+ write(os, vh.data(), n * 32);
+ return result;
+ }
+ // 3 = full inner node
+ auto const type = 3U;
+ auto const vs = size_varint(type);
+ result.second =
+ vs +
+ n * 32; // hashes
+ std::uint8_t* out = reinterpret_cast<
+ std::uint8_t*>(bf(result.second));
+ result.first = out;
+ ostream os(out, result.second);
+ write(os, type);
+ write(os, vh.data(), n * 32);
+ return result;
+ }
+ }
+
+ std::array::max> vi;
+ auto const vn = write_varint(
+ vi.data(), type);
+ std::pair result;
+ switch(type)
+ {
+ case 0: // uncompressed
+ {
+ result.second = vn + in_size;
+ std::uint8_t* p = reinterpret_cast<
+ std::uint8_t*>(bf(result.second));
+ result.first = p;
+ std::memcpy(p, vi.data(), vn);
+ std::memcpy(p + vn, in, in_size);
+ break;
+ }
+ case 1: // lz4
+ {
+ std::uint8_t* p;
+ auto const lzr = lz4_compress(
+ in, in_size, [&p, &vn, &bf]
+ (std::size_t n)
+ {
+ p = reinterpret_cast<
+ std::uint8_t*>(
+ bf(vn + n));
+ return p + vn;
+ });
+ std::memcpy(p, vi.data(), vn);
+ result.first = p;
+ result.second = vn + lzr.second;
+ break;
+ }
+ default:
+ throw std::logic_error(
+ "nodeobject codec: unknown=" +
+ std::to_string(type));
+ };
+ return result;
+}
+
+} // detail
+
+// Modifies an inner node to erase the ledger
+// sequence and type information so the codec
+// verification can pass.
+//
+template
+void
+filter_inner (void* in, std::size_t in_size)
+{
+ using beast::nudb::codec_error;
+ using namespace beast::nudb::detail;
+
+ // Check for inner node
+ if (in_size == 525)
+ {
+ istream is(in, in_size);
+ std::uint32_t index;
+ std::uint32_t unused;
+ std::uint8_t kind;
+ std::uint32_t prefix;
+ read(is, index);
+ read(is, unused);
+ read (is, kind);
+ read(is, prefix);
+ if (prefix == HashPrefix::innerNode)
+ {
+ ostream os(in, 9);
+ write(os, 0);
+ write(os, 0);
+ write (os, hotUNKNOWN);
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+class snappy_codec
+{
+public:
+ template
+ explicit
+ snappy_codec(Args&&... args)
+ {
+ }
+
+ char const*
+ name() const
+ {
+ return "snappy";
+ }
+
+ template
+ std::pair
+ compress (void const* in,
+ std::size_t in_size, BufferFactory&& bf) const
+ {
+ return snappy_compress(in, in_size, bf);
+ }
+
+ template
+ std::pair
+ decompress (void const* in,
+ std::size_t in_size, BufferFactory&& bf) const
+ {
+ return snappy_decompress(in, in_size, bf);
+ }
+};
+
+class lz4_codec
+{
+public:
+ template
+ explicit
+ lz4_codec(Args&&... args)
+ {
+ }
+
+ char const*
+ name() const
+ {
+ return "lz4";
+ }
+
+ template
+ std::pair
+ decompress (void const* in,
+ std::size_t in_size, BufferFactory&& bf) const
+ {
+ return lz4_compress(in, in_size, bf);
+ }
+
+ template
+ std::pair
+ compress (void const* in,
+ std::size_t in_size, BufferFactory&& bf) const
+ {
+ return lz4_compress(in, in_size, bf);
+ }
+};
+
+class nodeobject_codec
+{
+public:
+ template
+ explicit
+ nodeobject_codec(Args&&... args)
+ {
+ }
+
+ char const*
+ name() const
+ {
+ return "nodeobject";
+ }
+
+ template
+ std::pair
+ decompress (void const* in,
+ std::size_t in_size, BufferFactory&& bf) const
+ {
+ return detail::nodeobject_decompress(
+ in, in_size, bf);
+ }
+
+ template
+ std::pair
+ compress (void const* in,
+ std::size_t in_size, BufferFactory&& bf) const
+ {
+ return detail::nodeobject_compress(
+ in, in_size, bf);
+ }
+};
+
+} // NodeStore
+} // ripple
+
+#endif