diff --git a/Builds/VisualStudio2013/RippleD.vcxproj b/Builds/VisualStudio2013/RippleD.vcxproj
index 35d96db614..c098808a3d 100755
--- a/Builds/VisualStudio2013/RippleD.vcxproj
+++ b/Builds/VisualStudio2013/RippleD.vcxproj
@@ -804,20 +804,22 @@
+
+
+
+
-
+
-
-
@@ -826,21 +828,19 @@
-
-
-
-
-
+
-
+
True
+
+
@@ -860,6 +860,9 @@
True
+
+ True
+
True
@@ -867,6 +870,8 @@
+
+
diff --git a/Builds/VisualStudio2013/RippleD.vcxproj.filters b/Builds/VisualStudio2013/RippleD.vcxproj.filters
index 2406b0bb73..51784ab98d 100644
--- a/Builds/VisualStudio2013/RippleD.vcxproj.filters
+++ b/Builds/VisualStudio2013/RippleD.vcxproj.filters
@@ -1437,6 +1437,12 @@
beast
+
+ beast\nudb
+
+
+ beast\nudb
+
beast\nudb
@@ -1446,7 +1452,7 @@
beast\nudb\detail
-
+
beast\nudb\detail
@@ -1455,9 +1461,6 @@
beast\nudb\detail
-
- beast\nudb\detail
-
beast\nudb\detail
@@ -1470,27 +1473,24 @@
beast\nudb\detail
-
- beast\nudb\detail
-
beast\nudb\detail
-
+
beast\nudb\detail
-
- beast\nudb
-
beast\nudb
-
+
beast\nudb
beast\nudb
+
+ beast\nudb
+
beast\nudb
@@ -1515,6 +1515,9 @@
beast\nudb\tests
+
+ beast\nudb\tests
+
beast\nudb\tests
@@ -1524,6 +1527,9 @@
beast\nudb
+
+ beast\nudb
+
beast\random
diff --git a/src/beast/beast/nudb.h b/src/beast/beast/nudb.h
index 40b1d49e79..e413c336f5 100644
--- a/src/beast/beast/nudb.h
+++ b/src/beast/beast/nudb.h
@@ -20,10 +20,10 @@
#ifndef BEAST_NUDB_H_INCLUDED
#define BEAST_NUDB_H_INCLUDED
+#include
#include
-#include
+#include
#include
-#include
#include
#include
#include
diff --git a/src/beast/beast/nudb/README.md b/src/beast/beast/nudb/README.md
index 6cb2747bd7..777a98b69f 100644
--- a/src/beast/beast/nudb/README.md
+++ b/src/beast/beast/nudb/README.md
@@ -167,16 +167,23 @@ fixed-length Bucket Records.
char[8] Type The characters "nudb.key"
uint16 Version Holds the version number
+ uint64 UID Unique ID generated on creation
uint64 Appnum Application defined constant
+ uint16 KeySize Key size in bytes
+
uint64 Salt A random seed
uint64 Pepper The salt hashed
- uint16 KeySize Key size in bytes
uint16 BlockSize Size of a file block in bytes
+
uint16 LoadFactor Target fraction in 65536ths
- uint8[64] Reserved Zeroes
+
+ uint8[56] Reserved Zeroes
uint8[] Reserved Zero-pad to block size
-The Type identifies the file as belonging to nudb. Salt is
+The Type identifies the file as belonging to nudb. The UID is
+generated randomly when the database is created, and this value
+is stored in the data and log files as well. The UID is used
+to determine if files belong to the same database. Salt is
generated when the database is created and helps prevent
complexity attacks; the salt is prepended to the key material
when computing a hash, or used to initialize the state of
@@ -197,7 +204,8 @@ bucket, and defines the size of a bucket record. The load factor
is the target fraction of bucket occupancy.
None of the information in the key file header or the data file
-header may be changed after the database is created.
+header may be changed after the database is created, including
+the Appnum.
#### Bucket Record (fixed-length)
@@ -209,7 +217,7 @@ header may be changed after the database is created.
uint48 Offset Offset in data file of the data
uint48 Size The size of the value in bytes
- uint8[KeySize] Key The key
+ uint48 Hash The hash of the key
### Data File
@@ -220,14 +228,15 @@ variable-length Value Records and Spill Records.
char[8] Type The characters "nudb.dat"
uint16 Version Holds the version number
+ uint64 UID Unique ID generated on creation
uint64 Appnum Application defined constant
- uint64 Salt A random seed
uint16 KeySize Key size in bytes
+
uint8[64] Reserved Zeroes
-Salt contains the same value as the salt in the corresponding
-key file. This is placed in the data file so that key and value
-files belonging to the same database can be identified.
+UID contains the same value as the salt in the corresponding key
+file. This is placed in the data file so that key and value files
+belonging to the same database can be identified.
#### Data Record (variable-length)
@@ -244,15 +253,24 @@ files belonging to the same database can be identified.
### Log File
The Log file contains the Header followed by zero or more fixed size
+log records. Each log record contains a snapshot of a bucket. When a
+database is not closed cleanly, the recovery process applies the log
+records to the key file, overwriting data that may be only partially
+updated with known good information. After the log records are applied,
+the data and key files are truncated to the last known good size.
-#### Header (44 bytes)
+#### Header (62 bytes)
char[8] Type The characters "nudb.log"
uint16 Version Holds the version number
+ uint64 UID Unique ID generated on creation
uint64 Appnum Application defined constant
+ uint16 KeySize Key size in bytes
+
uint64 Salt A random seed.
uint64 Pepper The salt hashed
- uint16 KeySize Key size in bytes
+ uint16 BlockSize Size of a file block in bytes
+
uint64 KeyFileSize Size of key file.
uint64 DataFileSize Size of data file.
diff --git a/src/beast/beast/nudb/api.h b/src/beast/beast/nudb/api.h
new file mode 100644
index 0000000000..65ad74919e
--- /dev/null
+++ b/src/beast/beast/nudb/api.h
@@ -0,0 +1,109 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2014, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_NUDB_API_H_INCLUDED
+#define BEAST_NUDB_API_H_INCLUDED
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace beast {
+namespace nudb {
+
+// Convenience for consolidating template arguments
+//
+template <
+ class Hasher,
+ class Codec,
+ class File = native_file,
+ std::size_t BufferSize = 16 * 1024 * 1024
+>
+struct api
+{
+ using hash_type = Hasher;
+ using codec_type = Codec;
+ using file_type = File;
+ using store = nudb::store;
+
+ static std::size_t const buffer_size = BufferSize;
+
+ template
+ static
+ bool
+ create (
+ path_type const& dat_path,
+ path_type const& key_path,
+ path_type const& log_path,
+ std::uint64_t appnum,
+ std::uint64_t salt,
+ std::size_t key_size,
+ std::size_t block_size,
+ float load_factor,
+ Args&&... args)
+ {
+ return nudb::create(
+ dat_path, key_path, log_path,
+ appnum, salt, key_size, block_size,
+ load_factor, args...);
+ }
+
+ template
+ static
+ bool
+ recover (
+ path_type const& dat_path,
+ path_type const& key_path,
+ path_type const& log_path,
+ Args&&... args)
+ {
+ return nudb::recover(
+ dat_path, key_path, log_path, BufferSize,
+ args...);
+ }
+
+ static
+ verify_info
+ verify (
+ path_type const& dat_path,
+ path_type const& key_path)
+ {
+ return nudb::verify(
+ dat_path, key_path, BufferSize);
+ }
+
+ template
+ static
+ bool
+ visit(
+ path_type const& path,
+ Function&& f)
+ {
+ return nudb::visit(
+ path, BufferSize, f);
+ }
+};
+
+} // nudb
+} // beast
+
+#endif
diff --git a/src/beast/beast/nudb/error.h b/src/beast/beast/nudb/common.h
similarity index 67%
rename from src/beast/beast/nudb/error.h
rename to src/beast/beast/nudb/common.h
index a6f1915425..12a1345874 100644
--- a/src/beast/beast/nudb/error.h
+++ b/src/beast/beast/nudb/common.h
@@ -17,32 +17,48 @@
*/
//==============================================================================
-#ifndef BEAST_NUDB_ERROR_H_INCLUDED
-#define BEAST_NUDB_ERROR_H_INCLUDED
+#ifndef BEAST_NUDB_COMMON_H_INCLUDED
+#define BEAST_NUDB_COMMON_H_INCLUDED
-#include
-#include
#include
#include
namespace beast {
namespace nudb {
+// Commonly used types
+
+enum class file_mode
+{
+ scan, // read sequential
+ read, // read random
+ append, // read random, write append
+ write // read random, write random
+};
+
+using path_type = std::string;
+
// All exceptions thrown by nudb are derived
-// from std::exception except for fail_error
+// from std::runtime_error except for fail_error
+
+/** Thrown when a codec fails, e.g. corrupt data. */
+struct codec_error : std::runtime_error
+{
+ template
+ explicit
+ codec_error (String const& s)
+ : runtime_error(s)
+ {
+ }
+};
/** Base class for all errors thrown by file classes. */
struct file_error : std::runtime_error
{
+ template
explicit
- file_error (char const* s)
- : std::runtime_error(s)
- {
- }
-
- explicit
- file_error (std::string const& s)
- : std::runtime_error(s)
+ file_error (String const& s)
+ : runtime_error(s)
{
}
};
@@ -67,21 +83,24 @@ struct file_short_write_error : file_error
}
};
+/** Thrown when end of istream reached while reading. */
+struct short_read_error : std::runtime_error
+{
+ short_read_error()
+ : std::runtime_error(
+ "nudb: short read")
+ {
+ }
+};
+
/** Base class for all exceptions thrown by store. */
class store_error : public std::runtime_error
{
public:
+ template
explicit
- store_error (char const* m)
- : std::runtime_error(
- std::string("nudb: ") + m)
- {
- }
-
- explicit
- store_error (std::string const& m)
- : std::runtime_error(
- std::string("nudb: ") + m)
+ store_error (String const& s)
+ : runtime_error(s)
{
}
};
@@ -90,15 +109,10 @@ public:
class store_corrupt_error : public store_error
{
public:
+ template
explicit
- store_corrupt_error (char const* m)
- : store_error (m)
- {
- }
-
- explicit
- store_corrupt_error (std::string const& m)
- : store_error (m)
+ store_corrupt_error (String const& s)
+ : store_error(s)
{
}
};
diff --git a/src/beast/beast/nudb/create.h b/src/beast/beast/nudb/create.h
index c7ccd37eb0..30f3a700e9 100644
--- a/src/beast/beast/nudb/create.h
+++ b/src/beast/beast/nudb/create.h
@@ -22,16 +22,49 @@
#include
#include
-#include
#include
#include
#include
+#include
#include
#include
namespace beast {
namespace nudb {
+namespace detail {
+
+template
+std::uint64_t
+make_uid()
+{
+ std::random_device rng;
+ std::mt19937_64 gen {rng()};
+ std::uniform_int_distribution dist;
+ return dist(gen);
+}
+
+}
+
+/** Generate a random salt. */
+template
+std::uint64_t
+make_salt()
+{
+ std::random_device rng;
+ std::mt19937_64 gen {rng()};
+ std::uniform_int_distribution dist;
+ return dist(gen);
+}
+
+/** Returns the best guess at the volume's block size. */
+inline
+std::size_t
+block_size (path_type const& /*path*/)
+{
+ return 4096;
+}
+
/** Create a new database.
Preconditions:
The files must not exist
@@ -40,7 +73,12 @@ namespace nudb {
@param args Arguments passed to File constructors
@return `false` if any file could not be created.
*/
-template
+template <
+ class Hasher,
+ class Codec,
+ class File,
+ class... Args
+>
bool
create (
path_type const& dat_path,
@@ -50,10 +88,10 @@ create (
std::uint64_t salt,
std::size_t key_size,
std::size_t block_size,
- float load_factor)
+ float load_factor,
+ Args&&... args)
{
using namespace detail;
- using File = native_file;
if (key_size < 1)
throw std::domain_error(
"invalid key size");
@@ -67,43 +105,41 @@ create (
throw std::domain_error(
"nudb: load factor too large");
auto const capacity =
- bucket_capacity(key_size, block_size);
+ bucket_capacity(block_size);
if (capacity < 1)
throw std::domain_error(
"nudb: block size too small");
- File df;
- File kf;
- File lf;
- for(;;)
+ File df(args...);
+ File kf(args...);
+ File lf(args...);
+ if (df.create(
+ file_mode::append, dat_path))
{
- if (df.create(
- file_mode::append, dat_path))
+ if (kf.create (
+ file_mode::append, key_path))
{
- if (kf.create (
- file_mode::append, key_path))
- {
- if (lf.create(
- file_mode::append, log_path))
- break;
- File::erase (dat_path);
- }
- File::erase (key_path);
+ if (lf.create(
+ file_mode::append, log_path))
+ goto success;
+ File::erase (dat_path);
}
- return false;
+ File::erase (key_path);
}
-
+ return false;
+success:
dat_file_header dh;
dh.version = currentVersion;
+ dh.uid = make_uid();
dh.appnum = appnum;
- dh.salt = salt;
dh.key_size = key_size;
key_file_header kh;
kh.version = currentVersion;
+ kh.uid = dh.uid;
kh.appnum = appnum;
+ kh.key_size = key_size;
kh.salt = salt;
kh.pepper = pepper(salt);
- kh.key_size = key_size;
kh.block_size = block_size;
// VFALCO Should it be 65536?
// How do we set the min?
@@ -113,8 +149,7 @@ create (
write (kf, kh);
buffer buf(block_size);
std::memset(buf.get(), 0, block_size);
- bucket b (key_size, block_size,
- buf.get(), empty);
+ bucket b (block_size, buf.get(), empty);
b.write (kf, block_size);
// VFALCO Leave log file empty?
df.sync();
diff --git a/src/beast/beast/nudb/detail/arena.h b/src/beast/beast/nudb/detail/arena.h
index 6bf2695520..fa8059cefd 100644
--- a/src/beast/beast/nudb/detail/arena.h
+++ b/src/beast/beast/nudb/detail/arena.h
@@ -20,7 +20,6 @@
#ifndef BEAST_NUDB_DETAIL_ARENA_H_INCLUDED
#define BEAST_NUDB_DETAIL_ARENA_H_INCLUDED
-#include
#include
#include
#include
diff --git a/src/beast/beast/nudb/detail/bucket.h b/src/beast/beast/nudb/detail/bucket.h
index f7f4db43df..097494c57d 100644
--- a/src/beast/beast/nudb/detail/bucket.h
+++ b/src/beast/beast/nudb/detail/bucket.h
@@ -20,12 +20,11 @@
#ifndef BEAST_NUDB_DETAIL_BUCKET_H_INCLUDED
#define BEAST_NUDB_DETAIL_BUCKET_H_INCLUDED
-#include
+#include
#include
-#include
#include
#include
-#include
+#include
#include
#include
@@ -33,21 +32,7 @@ namespace beast {
namespace nudb {
namespace detail {
-// Key, hash, and bucket calculations:
-
-// Returns the hash of a key given the salt
-//
-template
-inline
-typename Hasher::result_type
-hash (void const* key,
- std::size_t key_size, std::size_t salt)
-{
- Hasher h (salt);
- h.append (key, key_size);
- return static_cast<
- typename Hasher::result_type>(h);
-}
+// bucket calculations:
// Returns bucket index given hash, buckets, and modulus
//
@@ -62,30 +47,6 @@ bucket_index (std::size_t h,
return n;
}
-// Returns the bucket index of a key
-//
-template
-inline
-std::size_t
-bucket_index (void const* key, std::size_t key_size,
- std::size_t salt, std::size_t buckets,
- std::size_t modulus)
-{
- return bucket_index (hash
- (key, key_size, salt), buckets, modulus);
-}
-
-// Returns the bucket index of a key
-// given the key file header
-template
-inline
-std::size_t
-bucket_index (void const* key, key_file_header const& kh)
-{
- return bucket_index(key, kh.key_size,
- kh.salt, kh.buckets, kh.modulus);
-}
-
//------------------------------------------------------------------------------
// Tag for constructing empty buckets
@@ -97,9 +58,8 @@ template
class bucket_t
{
private:
- std::size_t key_size_; // Size of key in bytes
std::size_t block_size_; // Size of a key file block
- std::size_t count_; // Current key count
+ std::size_t size_; // Current key count
std::size_t spill_; // Offset of next spill record or 0
std::uint8_t* p_; // Pointer to the bucket blob
@@ -108,23 +68,15 @@ public:
{
std::size_t offset;
std::size_t size;
- void const* key;
+ std::size_t hash;
};
bucket_t (bucket_t const&) = default;
bucket_t& operator= (bucket_t const&) = default;
- bucket_t (std::size_t key_size,
- std::size_t block_size, void* p);
+ bucket_t (std::size_t block_size, void* p);
- bucket_t (std::size_t key_size,
- std::size_t block_size, void* p, empty_t);
-
- std::size_t
- key_size() const
- {
- return key_size_;
- }
+ bucket_t (std::size_t block_size, void* p, empty_t);
std::size_t
block_size() const
@@ -135,44 +87,46 @@ public:
std::size_t
compact_size() const
{
- return detail::compact_size(
- key_size_, count_);
+ return detail::bucket_size(size_);
}
bool
empty() const
{
- return count_ == 0;
+ return size_ == 0;
}
bool
full() const
{
- return count_ >= detail::bucket_capacity(
- key_size_, block_size_);
+ return size_ >=
+ detail::bucket_capacity(block_size_);
}
std::size_t
size() const
{
- return count_;
+ return size_;
}
// Returns offset of next spill record or 0
+ //
std::size_t
spill() const
{
return spill_;
}
- // Clear contents of the bucket
- void
- clear();
-
// Set offset of next spill record
+ //
void
spill (std::size_t offset);
+ // Clear contents of the bucket
+ //
+ void
+ clear();
+
// Returns the record for a key
// entry without bounds checking.
//
@@ -185,12 +139,15 @@ public:
return at(i);
}
- std::pair
- find (void const* key) const;
+ // Returns index of entry with prefix
+ // equal to or greater than the given prefix.
+ //
+ std::size_t
+ lower_bound (std::size_t h) const;
void
insert (std::size_t offset,
- std::size_t size, void const* key);
+ std::size_t size, std::size_t h);
// Erase an element by index
//
@@ -227,45 +184,31 @@ private:
// Update size and spill in the blob
void
update();
-
- std::pair
- lower_bound (void const* key) const;
};
//------------------------------------------------------------------------------
template
-bucket_t<_>::bucket_t (std::size_t key_size,
+bucket_t<_>::bucket_t (
std::size_t block_size, void* p)
- : key_size_ (key_size)
- , block_size_ (block_size)
+ : block_size_ (block_size)
, p_ (reinterpret_cast(p))
{
// Bucket Record
istream is(p_, block_size);
- detail::read(is, count_); // Count
+ detail::read(is, size_); // Count
detail::read(is, spill_); // Spill
}
template
-bucket_t<_>::bucket_t (std::size_t key_size,
+bucket_t<_>::bucket_t (
std::size_t block_size, void* p, empty_t)
- : key_size_ (key_size)
- , block_size_ (block_size)
- , count_ (0)
+ : block_size_ (block_size)
+ , size_ (0)
, spill_ (0)
, p_ (reinterpret_cast(p))
{
- update();
-}
-
-template
-void
-bucket_t<_>::clear()
-{
- count_ = 0;
- spill_ = 0;
- update();
+ clear();
}
template
@@ -276,6 +219,15 @@ bucket_t<_>::spill (std::size_t offset)
update();
}
+template
+void
+bucket_t<_>::clear()
+{
+ size_ = 0;
+ spill_ = 0;
+ std::memset(p_, 0, block_size_);
+}
+
template
auto
bucket_t<_>::at (std::size_t i) const ->
@@ -286,7 +238,7 @@ bucket_t<_>::at (std::size_t i) const ->
std::size_t const w =
field::size + // Offset
field::size + // Size
- key_size_; // Key
+ field::size; // Prefix
// Bucket Record
detail::istream is(p_ +
field::size + // Count
@@ -297,54 +249,80 @@ bucket_t<_>::at (std::size_t i) const ->
is, result.offset); // Offset
detail::read(
is, result.size); // Size
- result.key = is.data(key_size_); // Key
+ detail::read(
+ is, result.hash); // Hash
return result;
}
template
-auto
-bucket_t<_>::find (void const* key) const ->
- std::pair
+std::size_t
+bucket_t<_>::lower_bound (
+ std::size_t h) const
{
- std::pair result;
- std::size_t i;
- std::tie(i, result.second) = lower_bound(key);
- if (result.second)
- result.first = at(i);
- return result;
+ // Bucket Entry
+ auto const w =
+ field::size + // Offset
+ field::size + // Size
+ field::size; // Hash
+ // Bucket Record
+ auto const p = p_ +
+ field::size + // Count
+ field::size + // Spill
+ // Bucket Entry
+ field::size + // Offset
+ field::size; // Size
+ std::size_t step;
+ std::size_t first = 0;
+ std::size_t count = size_;
+ while (count > 0)
+ {
+ step = count / 2;
+ auto const i = first + step;
+ std::size_t h1;
+ readp(p + i * w, h1);
+ if (h1 < h)
+ {
+ first = i + 1;
+ count -= step + 1;
+ }
+ else
+ {
+ count = step;
+ }
+ }
+ return first;
}
template
void
bucket_t<_>::insert (std::size_t offset,
- std::size_t size, void const* key)
+ std::size_t size, std::size_t h)
{
- bool found;
- std::size_t i;
- std::tie(i, found) = lower_bound(key);
- (void)found;
- assert(! found);
+ std::size_t i = lower_bound(h);
// Bucket Record
auto const p = p_ +
- field::size + // Count
- field::size; // Spill
+ field<
+ std::uint16_t>::size + // Count
+ field::size; // Spill
// Bucket Entry
std::size_t const w =
- field::size + // Offset
- field::size + // Size
- key_size_; // Key
+ field::size + // Offset
+ field::size + // Size
+ field::size; // Hash
std::memmove (
p + (i + 1) * w,
p + i * w,
- (count_ - i) * w);
- count_++;
+ (size_ - i) * w);
+ size_++;
update();
// Bucket Entry
ostream os (p + i * w, w);
- detail::write(os, offset); // Offset
- detail::write(os, size); // Size
- std::memcpy (os.data(key_size_),
- key, key_size_); // Key
+ detail::write(
+ os, offset); // Offset
+ detail::write(
+ os, size); // Size
+ detail::write(
+ os, h); // Prefix
}
template
@@ -353,18 +331,20 @@ bucket_t<_>::erase (std::size_t i)
{
// Bucket Record
auto const p = p_ +
- field::size + // Count
- field::size; // Spill
+ field<
+ std::uint16_t>::size + // Count
+ field::size; // Spill
auto const w =
- field::size + // Offset
- field::size + // Size
- key_size_; // Key
- --count_;
- if (i != count_)
+ field::size + // Offset
+ field::size + // Size
+ field::size; // Hash
+ --size_;
+ if (i < size_)
std::memmove(
- p + i * w,
- p + (i + 1) * w,
- (count_ - i) * w);
+ p + i * w,
+ p + (i + 1) * w,
+ (size_ - i) * w);
+ std::memset(p + size_ * w, 0, w);
update();
}
@@ -374,17 +354,15 @@ void
bucket_t<_>::read (File& f, std::size_t offset)
{
auto const cap = bucket_capacity (
- key_size_, block_size_);
+ block_size_);
// Excludes padding to block size
- f.read (offset, p_, bucket_size(
- key_size_, bucket_capacity(
- key_size_, block_size_)));
+ f.read (offset, p_, bucket_size(cap));
istream is(p_, block_size_);
detail::read<
- std::uint16_t>(is, count_); // Count
+ std::uint16_t>(is, size_); // Count
detail::read<
uint48_t>(is, spill_); // Spill
- if (count_ > cap)
+ if (size_ > cap)
throw store_corrupt_error(
"bad bucket size");
}
@@ -399,19 +377,21 @@ bucket_t<_>::read (bulk_reader& r)
detail::field::size +
detail::field::size);
detail::read<
- std::uint16_t>(is, count_); // Count
- detail::read(is, spill_); // Spill
+ std::uint16_t>(is, size_); // Count
+ detail::read(
+ is, spill_); // Spill
update();
// Excludes empty bucket entries
- auto const w = count_ * (
- field::size + // Offset
- field::size + // Size
- key_size_); // Key
+ auto const w = size_ * (
+ field::size + // Offset
+ field::size + // Size
+ field::size); // Hash
is = r.prepare (w);
std::memcpy(p_ +
- field::size + // Count
- field::size, // Spill
- is.data(w), w); // Entries
+ field<
+ std::uint16_t>::size + // Count
+ field::size, // Spill
+ is.data(w), w); // Entries
}
template
@@ -447,56 +427,40 @@ bucket_t<_>::update()
// Bucket Record
ostream os(p_, block_size_);
detail::write<
- std::uint16_t>(os, count_); // Count
+ std::uint16_t>(os, size_); // Count
detail::write<
uint48_t>(os, spill_); // Spill
}
-// bool is true if key matches index
-template
-std::pair
-bucket_t<_>::lower_bound (
- void const* key) const
-{
- // Bucket Entry
- auto const w =
- field::size + // Offset
- field::size + // Size
- key_size_; // Key
- // Bucket Record
- auto const p = p_ +
- field::size + // Count
- field::size + // Spill
- // Bucket Entry
- field::size + // Offset
- field::size; // Size
- std::size_t step;
- std::size_t first = 0;
- std::size_t count = count_;
- while (count > 0)
- {
- step = count / 2;
- auto const i = first + step;
- auto const c = std::memcmp (
- p + i * w, key, key_size_);
- if (c < 0)
- {
- first = i + 1;
- count -= step + 1;
- }
- else if (c > 0)
- {
- count = step;
- }
- else
- {
- return std::make_pair (i, true);
- }
- }
- return std::make_pair (first, false);
-}
using bucket = bucket_t<>;
+// Spill bucket if full.
+// The bucket is cleared after it spills.
+//
+template
+void
+maybe_spill(bucket& b, bulk_writer& w)
+{
+ if (b.full())
+ {
+ // Spill Record
+ auto const offset = w.offset();
+ auto os = w.prepare(
+ field::size + // Zero
+ field::size + // Size
+ b.compact_size());
+ write (os, 0); // Zero
+ write (
+ os, b.compact_size()); // Size
+ auto const spill =
+ offset + os.size();
+ b.write (os); // Bucket
+ // Update bucket
+ b.clear();
+ b.spill (spill);
+ }
+}
+
} // detail
} // nudb
} // beast
diff --git a/src/beast/beast/nudb/detail/buffer.h b/src/beast/beast/nudb/detail/buffer.h
new file mode 100644
index 0000000000..2c71912ece
--- /dev/null
+++ b/src/beast/beast/nudb/detail/buffer.h
@@ -0,0 +1,99 @@
+//------------------------------------------------------------------------------
+/*
+ This file is part of Beast: https://github.com/vinniefalco/Beast
+ Copyright 2014, Vinnie Falco
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+//==============================================================================
+
+#ifndef BEAST_NUDB_DETAIL_BUFFER_H_INCLUDED
+#define BEAST_NUDB_DETAIL_BUFFER_H_INCLUDED
+
+#include
+#include
+#include
+
+namespace beast {
+namespace nudb {
+namespace detail {
+
+// Simple growable memory buffer
+class buffer
+{
+private:
+ std::size_t size_ = 0;
+ std::unique_ptr buf_;
+
+public:
+ ~buffer() = default;
+ buffer() = default;
+ buffer (buffer const&) = delete;
+ buffer& operator= (buffer const&) = delete;
+
+ explicit
+ buffer (std::size_t n)
+ : size_ (n)
+ , buf_ (new std::uint8_t[n])
+ {
+ }
+
+ buffer (buffer&& other)
+ : size_ (other.size_)
+ , buf_ (std::move(other.buf_))
+ {
+ other.size_ = 0;
+ }
+
+ buffer& operator= (buffer&& other)
+ {
+ size_ = other.size_;
+ buf_ = std::move(other.buf_);
+ other.size_ = 0;
+ return *this;
+ }
+
+ std::size_t
+ size() const
+ {
+ return size_;
+ }
+
+ std::uint8_t*
+ get() const
+ {
+ return buf_.get();
+ }
+
+ void
+ reserve (std::size_t n)
+ {
+ if (size_ < n)
+ buf_.reset (new std::uint8_t[n]);
+ size_ = n;
+ }
+
+ // BufferFactory
+ void*
+ operator() (std::size_t n)
+ {
+ reserve(n);
+ return buf_.get();
+ }
+};
+
+} // detail
+} // nudb
+} // beast
+
+#endif
diff --git a/src/beast/beast/nudb/detail/buffers.h b/src/beast/beast/nudb/detail/buffers.h
deleted file mode 100644
index cd2cd40de2..0000000000
--- a/src/beast/beast/nudb/detail/buffers.h
+++ /dev/null
@@ -1,147 +0,0 @@
-//------------------------------------------------------------------------------
-/*
- This file is part of Beast: https://github.com/vinniefalco/Beast
- Copyright 2014, Vinnie Falco
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
- copyright notice and this permission notice appear in all copies.
-
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-*/
-//==============================================================================
-
-#ifndef BEAST_NUDB_DETAIL_BUFFERS_H_INCLUDED
-#define BEAST_NUDB_DETAIL_BUFFERS_H_INCLUDED
-
-#include
-#include
-#include
-#include
-
-namespace beast {
-namespace nudb {
-namespace detail {
-
-// Thread safe pool of temp buffers,
-// to avoid needless calls to malloc.
-template
-class buffers_t
-{
-private:
- struct element
- {
- element* next;
- };
-
- std::size_t const block_size_;
- std::mutex m_;
- element* h_ = nullptr;
-
-public:
- class value_type
- {
- private:
- buffers_t& b_;
- element* e_;
-
- public:
- value_type (value_type const&) = delete;
- value_type& operator= (value_type const&) = delete;
-
- explicit
- value_type (buffers_t& b)
- : b_ (b)
- , e_ (b.acquire())
- {
- }
-
- ~value_type()
- {
- b_.release(e_);
- }
-
- std::uint8_t*
- get() const
- {
- return const_cast (
- reinterpret_cast<
- std::uint8_t const*>(e_ + 1));
- }
- };
-
- explicit
- buffers_t (std::size_t block_size);
-
- ~buffers_t();
-
-private:
- element*
- acquire();
-
- void
- release (element* e);
-};
-
-template
-buffers_t<_>::buffers_t (std::size_t block_size)
- : block_size_ (block_size)
- , h_ (nullptr)
-{
-}
-
-template
-buffers_t<_>::~buffers_t()
-{
- for (element* e = h_; e;)
- {
- element* const next = e->next;
- e->~element();
- delete[] reinterpret_cast<
- std::uint8_t*>(e);
- e = next;
- }
-}
-
-template
-auto
-buffers_t<_>::acquire() ->
- element*
-{
- {
- std::lock_guard m(m_);
- element* e = h_;
- if (e)
- {
- h_ = e->next;
- return e;
- }
- }
- return ::new(
- new std::uint8_t[
- sizeof(element) + block_size_]
- ) element;
-}
-
-template
-void
-buffers_t<_>::release (element* e)
-{
- std::lock_guard m(m_);
- e->next = h_;
- h_ = e;
-}
-
-using buffers = buffers_t<>;
-
-} // detail
-} // nudb
-} // beast
-
-#endif
diff --git a/src/beast/beast/nudb/detail/bulkio.h b/src/beast/beast/nudb/detail/bulkio.h
index 92ea8219a9..f2c252fdc2 100644
--- a/src/beast/beast/nudb/detail/bulkio.h
+++ b/src/beast/beast/nudb/detail/bulkio.h
@@ -20,7 +20,7 @@
#ifndef BEAST_NUDB_DETAIL_BULKIO_H_INCLUDED
#define BEAST_NUDB_DETAIL_BULKIO_H_INCLUDED
-#include
+#include
#include
#include
#include
@@ -45,10 +45,16 @@ public:
bulk_reader (File& f, std::size_t offset,
std::size_t last, std::size_t buffer_size);
+ std::size_t
+ offset() const
+ {
+ return offset_ - avail_;
+ }
+
bool
eof() const
{
- return offset_ - avail_ == last_;
+ return offset() >= last_;
}
istream
diff --git a/src/beast/beast/nudb/detail/cache.h b/src/beast/beast/nudb/detail/cache.h
index d619d3b23a..5c97ffad81 100644
--- a/src/beast/beast/nudb/detail/cache.h
+++ b/src/beast/beast/nudb/detail/cache.h
@@ -22,7 +22,6 @@
#include
#include
-#include
#include
#include
#include
@@ -77,8 +76,8 @@ private:
operator() (argument_type const& e) const
{
return std::make_pair(e.first,
- bucket (cache_->key_size_,
- cache_->block_size_, e.second));
+ bucket (cache_->block_size_,
+ e.second));
}
};
@@ -209,7 +208,7 @@ cache_t<_>::create (std::size_t n)
{
auto const p = arena_.alloc (block_size_);
map_.emplace (n, p);
- return bucket (key_size_, block_size_,
+ return bucket (block_size_,
p, detail::empty);
}
diff --git a/src/beast/beast/nudb/detail/config.h b/src/beast/beast/nudb/detail/config.h
deleted file mode 100644
index 0a40832caf..0000000000
--- a/src/beast/beast/nudb/detail/config.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//------------------------------------------------------------------------------
-/*
- This file is part of Beast: https://github.com/vinniefalco/Beast
- Copyright 2014, Vinnie Falco
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
- copyright notice and this permission notice appear in all copies.
-
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-*/
-//==============================================================================
-
-#ifndef BEAST_NUDB_DETAIL_CONFIG_H_INCLUDED
-#define BEAST_NUDB_DETAIL_CONFIG_H_INCLUDED
-
-#include
-
-// Compiles out domain checks
-#ifndef BEAST_NUDB_NO_DOMAIN_CHECK
-# ifdef NDEBUG
-# define BEAST_NUDB_NO_DOMAIN_CHECK 1
-# else
-# define BEAST_NUDB_NO_DOMAIN_CHECK 0
-# endif
-#endif
-
-namespace beast {
-namespace nudb {
-
-// xxhasher is the fastest and the best choice
-// when keys are already uniformly distributed
-using default_hash = xxhasher;
-
-namespace detail {
-
-// Returns the closest power of 2 not less than x
-template
-std::size_t
-ceil_pow2 (unsigned long long x)
-{
- static const unsigned long long t[6] = {
- 0xFFFFFFFF00000000ull,
- 0x00000000FFFF0000ull,
- 0x000000000000FF00ull,
- 0x00000000000000F0ull,
- 0x000000000000000Cull,
- 0x0000000000000002ull
- };
-
- int y = (((x & (x - 1)) == 0) ? 0 : 1);
- int j = 32;
- int i;
-
- for(i = 0; i < 6; i++) {
- int k = (((x & t[i]) == 0) ? 0 : j);
- y += k;
- x >>= k;
- j >>= 1;
- }
-
- return std::size_t(1)<
#include
#include // for BEAST_CONSTEXPR
#include
@@ -85,16 +84,26 @@ struct field
static std::size_t BEAST_CONSTEXPR max = 0xffffffffffffffff;
};
-// read field from istream
+// read field from memory
+
+template ::value>* = nullptr>
+void
+readp (void const* v, U& u)
+{
+ std::uint8_t const* p =
+ reinterpret_cast(v);
+ u = *p;
+}
template ::value>* = nullptr>
void
-read (istream& is, U& u)
+readp (void const* v, U& u)
{
- T t;
std::uint8_t const* p =
- is.data(field::size);
+ reinterpret_cast(v);
+ T t;
t = T(*p++)<< 8;
t = T(*p ) | t;
u = t;
@@ -103,25 +112,25 @@ read (istream& is, U& u)
template ::value>* = nullptr>
void
-read (istream& is, U& u)
+readp (void const* v, U& u)
{
- T t;
std::uint8_t const* p =
- is.data(field::size);
- t = (T(*p++)<<16) | t;
- t = (T(*p++)<< 8) | t;
- t = T(*p ) | t;
+ reinterpret_cast(v);
+ std::uint32_t t;
+ t = std::uint32_t(*p++)<<16;
+ t = (std::uint32_t(*p++)<< 8) | t;
+ t = std::uint32_t(*p ) | t;
u = t;
}
template ::value>* = nullptr>
void
-read (istream& is, U& u)
+readp (void const* v, U& u)
{
- T t;
std::uint8_t const* p =
- is.data(field::size);
+ reinterpret_cast(v);
+ T t;
t = T(*p++)<<24;
t = (T(*p++)<<16) | t;
t = (T(*p++)<< 8) | t;
@@ -132,11 +141,11 @@ read (istream& is, U& u)
template ::value>* = nullptr>
void
-read (istream& is, U& u)
+readp (void const* v, U& u)
{
- std::uint64_t t;
std::uint8_t const* p =
- is.data(field::size);
+ reinterpret_cast(v);
+ std::uint64_t t;
t = (std::uint64_t(*p++)<<40);
t = (std::uint64_t(*p++)<<32) | t;
t = (std::uint64_t(*p++)<<24) | t;
@@ -149,11 +158,11 @@ read (istream& is, U& u)
template ::value>* = nullptr>
void
-read (istream& is, U& u)
+readp (void const* v, U& u)
{
- T t;
std::uint8_t const* p =
- is.data(field::size);
+ reinterpret_cast(v);
+ T t;
t = T(*p++)<<56;
t = (T(*p++)<<48) | t;
t = (T(*p++)<<40) | t;
@@ -165,18 +174,32 @@ read (istream& is, U& u)
u = t;
}
+// read field from istream
+
+template
+void
+read (istream& is, U& u)
+{
+ readp(is.data(field::size), u);
+}
+
// write field to ostream
+template ::value>* = nullptr>
+void
+write (ostream& os, U const& u)
+{
+ std::uint8_t* p =
+ os.data(field::size);
+ *p = u;
+}
+
template ::value>* = nullptr>
void
write (ostream& os, U const& u)
{
-#ifndef BEAST_NUDB_NO_DOMAIN_CHECK
- if (u > field::max)
- throw std::logic_error(
- "nudb: field max exceeded");
-#endif
T t = u;
std::uint8_t* p =
os.data(field::size);
@@ -184,16 +207,11 @@ write (ostream& os, U const& u)
*p = t &0xff;
}
-template ::value>* = nullptr>
void
write (ostream& os, U const& u)
{
-#ifndef BEAST_NUDB_NO_DOMAIN_CHECK
- if (u > field::max)
- throw std::logic_error(
- "nudb: field max exceeded");
-#endif
T t = u;
std::uint8_t* p =
os.data(field::size);
@@ -202,16 +220,11 @@ write (ostream& os, U const& u)
*p = t &0xff;
}
-template ::value>* = nullptr>
void
write (ostream& os, U const& u)
{
-#ifndef BEAST_NUDB_NO_DOMAIN_CHECK
- if (u > field::max)
- throw std::logic_error(
- "nudb: field max exceeded");
-#endif
T t = u;
std::uint8_t* p =
os.data(field::size);
@@ -221,16 +234,11 @@ write (ostream& os, U const& u)
*p = t &0xff;
}
-template ::value>* = nullptr>
void
write (ostream& os, U const& u)
{
-#ifndef BEAST_NUDB_NO_DOMAIN_CHECK
- if (u > field::max)
- throw std::logic_error(
- "nudb: field max exceeded");
-#endif
std::uint64_t const t = u;
std::uint8_t* p =
os.data(field::size);
@@ -242,16 +250,11 @@ write (ostream& os, U const& u)
*p = t &0xff;
}
-template ::value>* = nullptr>
void
write (ostream& os, U const& u)
{
-#ifndef BEAST_NUDB_NO_DOMAIN_CHECK
- if (u > field::max)
- throw std::logic_error(
- "nudb: field max exceeded");
-#endif
T t = u;
std::uint8_t* p =
os.data(field::size);
diff --git a/src/beast/beast/nudb/detail/format.h b/src/beast/beast/nudb/detail/format.h
index 512307f029..0d1179d3b1 100644
--- a/src/beast/beast/nudb/detail/format.h
+++ b/src/beast/beast/nudb/detail/format.h
@@ -20,7 +20,7 @@
#ifndef BEAST_NUDB_DETAIL_FORMAT_H_INCLUDED
#define BEAST_NUDB_DETAIL_FORMAT_H_INCLUDED
-#include
+#include
#include
#include
#include // for BEAST_CONSTEXPR
@@ -38,22 +38,23 @@ namespace detail {
// Format of the nudb files:
-static std::size_t BEAST_CONSTEXPR currentVersion = 1;
+static std::size_t BEAST_CONSTEXPR currentVersion = 2;
struct dat_file_header
{
static std::size_t BEAST_CONSTEXPR size =
8 + // Type
2 + // Version
+ 8 + // UID
8 + // Appnum
- 8 + // Salt
2 + // KeySize
+
64; // (Reserved)
char type[8];
std::size_t version;
+ std::uint64_t uid;
std::uint64_t appnum;
- std::uint64_t salt;
std::size_t key_size;
};
@@ -62,20 +63,25 @@ struct key_file_header
static std::size_t BEAST_CONSTEXPR size =
8 + // Type
2 + // Version
+ 8 + // UID
8 + // Appnum
+ 2 + // KeySize
+
8 + // Salt
8 + // Pepper
- 2 + // KeySize
2 + // BlockSize
2 + // LoadFactor
- 64; // (Reserved)
+
+ 56; // (Reserved)
char type[8];
std::size_t version;
+ std::uint64_t uid;
std::uint64_t appnum;
+ std::size_t key_size;
+
std::uint64_t salt;
std::uint64_t pepper;
- std::size_t key_size;
std::size_t block_size;
std::size_t load_factor;
@@ -91,23 +97,65 @@ struct log_file_header
static std::size_t BEAST_CONSTEXPR size =
8 + // Type
2 + // Version
+ 8 + // UID
8 + // Appnum
+ 2 + // KeySize
+
8 + // Salt
8 + // Pepper
- 2 + // KeySize
+ 2 + // BlockSize
+
8 + // KeyFileSize
8; // DataFileSize
char type[8];
std::size_t version;
+ std::uint64_t uid;
std::uint64_t appnum;
+ std::size_t key_size;
std::uint64_t salt;
std::uint64_t pepper;
- std::size_t key_size;
+ std::size_t block_size;
std::size_t key_file_size;
std::size_t dat_file_size;
};
+// Type used to store hashes in buckets.
+// This can be smaller than the output
+// of the hash function.
+//
+using hash_t = uint48_t;
+
+static_assert(field::size <=
+ sizeof(std::size_t), "");
+
+template
+std::size_t
+make_hash (std::size_t h);
+
+template<>
+inline
+std::size_t
+make_hash(std::size_t h)
+{
+ return (h>>16)&0xffffffffffff;
+}
+
+// Returns the hash of a key given the salt.
+// Note: The hash is expressed in hash_t units
+//
+template
+inline
+std::size_t
+hash (void const* key,
+ std::size_t key_size, std::size_t salt)
+{
+ Hasher h (salt);
+ h.append (key, key_size);
+ return make_hash(static_cast<
+ typename Hasher::result_type>(h));
+}
+
// Computes pepper from salt
//
template
@@ -124,8 +172,7 @@ pepper (std::size_t salt)
//
template
std::size_t
-bucket_size (std::size_t key_size,
- std::size_t capacity)
+bucket_size (std::size_t capacity)
{
// Bucket Record
return
@@ -134,33 +181,14 @@ bucket_size (std::size_t key_size,
capacity * (
field::size + // Offset
field::size + // Size
- key_size); // Key
+ field::size); // Hash
}
-// Returns the size of a bucket large enough to
-// hold size keys of length key_size.
-//
-inline
-std::size_t
-compact_size(std::size_t key_size,
- std::size_t size)
-{
- // Bucket Record
- return
- field::size + // Size
- field::size + // Spill
- size * (
- field::size + // Offset
- field::size + // Size
- key_size); // Key
-}
-
-// Returns: number of keys that fit in a bucket
+// Returns the number of entries that fit in a bucket
//
template
std::size_t
-bucket_capacity (std::size_t key_size,
- std::size_t block_size)
+bucket_capacity (std::size_t block_size)
{
// Bucket Record
auto const size =
@@ -169,17 +197,18 @@ bucket_capacity (std::size_t key_size,
auto const entry_size =
field::size + // Offset
field::size + // Size
- key_size; // Key
+ field::size; // Hash
if (block_size < key_file_header::size ||
block_size < size)
return 0;
return (block_size - size) / entry_size;
}
-// returns the number of bytes occupied by a value record
+// Returns the number of bytes occupied by a value record
inline
std::size_t
-data_size (std::size_t size, std::size_t key_size)
+value_size (std::size_t size,
+ std::size_t key_size)
{
// Data Record
return
@@ -188,6 +217,34 @@ data_size (std::size_t size, std::size_t key_size)
size; // Data
}
+// Returns the closest power of 2 not less than x
+template
+std::size_t
+ceil_pow2 (unsigned long long x)
+{
+ static const unsigned long long t[6] = {
+ 0xFFFFFFFF00000000ull,
+ 0x00000000FFFF0000ull,
+ 0x000000000000FF00ull,
+ 0x00000000000000F0ull,
+ 0x000000000000000Cull,
+ 0x0000000000000002ull
+ };
+
+ int y = (((x & (x - 1)) == 0) ? 0 : 1);
+ int j = 32;
+ int i;
+
+ for(i = 0; i < 6; i++) {
+ int k = (((x & t[i]) == 0) ? 0 : j);
+ y += k;
+ x >>= k;
+ j >>= 1;
+ }
+
+ return std::size_t(1)<