Update src/rocksdb2 to rocksdb-3.5.1:

Merge commit 'c168d54495d7d7b84639514f6443ad99b89ce996' into develop
This commit is contained in:
Vinnie Falco
2014-10-27 11:36:32 -07:00
195 changed files with 4433 additions and 9442 deletions

View File

@@ -1,4 +1,4 @@
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.BlockBasedTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv org.rocksdb.GenericRateLimiterConfig
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics org.rocksdb.RocksIterator org.rocksdb.VectorMemTableConfig org.rocksdb.SkipListMemTableConfig org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig org.rocksdb.PlainTableConfig org.rocksdb.BlockBasedTableConfig org.rocksdb.ReadOptions org.rocksdb.Filter org.rocksdb.BloomFilter org.rocksdb.RestoreOptions org.rocksdb.RestoreBackupableDB org.rocksdb.RocksEnv
NATIVE_INCLUDE = ./include
ROCKSDB_JAR = rocksdbjni.jar

View File

@@ -72,34 +72,14 @@ public class RocksDBSample {
assert(options.memTableFactoryName().equals("SkipListFactory"));
options.setTableFormatConfig(new PlainTableConfig());
// Plain-Table requires mmap read
options.setAllowMmapReads(true);
assert(options.tableFactoryName().equals("PlainTable"));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000,
10000, 10));
options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000));
BlockBasedTableConfig table_options = new BlockBasedTableConfig();
table_options.setBlockCacheSize(64 * SizeUnit.KB)
.setFilterBitsPerKey(10)
.setCacheNumShardBits(6)
.setBlockSizeDeviation(5)
.setBlockRestartInterval(10)
.setCacheIndexAndFilterBlocks(true)
.setHashIndexAllowCollision(false)
.setBlockCacheCompressedSize(64 * SizeUnit.KB)
.setBlockCacheCompressedNumShardBits(10);
.setCacheNumShardBits(6);
assert(table_options.blockCacheSize() == 64 * SizeUnit.KB);
assert(table_options.cacheNumShardBits() == 6);
assert(table_options.blockSizeDeviation() == 5);
assert(table_options.blockRestartInterval() == 10);
assert(table_options.cacheIndexAndFilterBlocks() == true);
assert(table_options.hashIndexAllowCollision() == false);
assert(table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB);
assert(table_options.blockCacheCompressedNumShardBits() == 10);
options.setTableFormatConfig(table_options);
assert(options.tableFactoryName().equals("BlockBasedTable"));
@@ -108,8 +88,6 @@ public class RocksDBSample {
db.put("hello".getBytes(), "world".getBytes());
byte[] value = db.get("hello".getBytes());
assert("world".equals(new String(value)));
String str = db.getProperty("rocksdb.stats");
assert(str != null && str != "");
} catch (RocksDBException e) {
System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e);
assert(db == null);
@@ -143,29 +121,6 @@ public class RocksDBSample {
System.out.println("");
}
// write batch test
WriteOptions writeOpt = new WriteOptions();
for (int i = 10; i <= 19; ++i) {
WriteBatch batch = new WriteBatch();
for (int j = 10; j <= 19; ++j) {
batch.put(String.format("%dx%d", i, j).getBytes(),
String.format("%d", i * j).getBytes());
}
db.write(writeOpt, batch);
batch.dispose();
}
for (int i = 10; i <= 19; ++i) {
for (int j = 10; j <= 19; ++j) {
assert(new String(
db.get(String.format("%dx%d", i, j).getBytes())).equals(
String.format("%d", i * j)));
System.out.format("%s ", new String(db.get(
String.format("%dx%d", i, j).getBytes())));
}
System.out.println("");
}
writeOpt.dispose();
value = db.get("1x1".getBytes());
assert(value != null);
value = db.get("world".getBytes());

View File

@@ -14,14 +14,11 @@ public class BlockBasedTableConfig extends TableFormatConfig {
public BlockBasedTableConfig() {
noBlockCache_ = false;
blockCacheSize_ = 8 * 1024 * 1024;
blockSize_ = 4 * 1024;
blockSizeDeviation_ = 10;
blockRestartInterval_ = 16;
blockSize_ = 4 * 1024;
blockSizeDeviation_ =10;
blockRestartInterval_ =16;
wholeKeyFiltering_ = true;
bitsPerKey_ = 10;
cacheIndexAndFilterBlocks_ = false;
hashIndexAllowCollision_ = true;
blockCacheCompressedSize_ = 0;
bitsPerKey_ = 0;
}
/**
@@ -74,8 +71,8 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* number means use default settings."
* @return the reference to the current option.
*/
public BlockBasedTableConfig setCacheNumShardBits(int blockCacheNumShardBits) {
blockCacheNumShardBits_ = blockCacheNumShardBits;
public BlockBasedTableConfig setCacheNumShardBits(int numShardBits) {
numShardBits_ = numShardBits;
return this;
}
@@ -87,7 +84,7 @@ public class BlockBasedTableConfig extends TableFormatConfig {
* @return the number of shard bits used in the block cache.
*/
public int cacheNumShardBits() {
return blockCacheNumShardBits_;
return numShardBits_;
}
/**
@@ -189,135 +186,25 @@ public class BlockBasedTableConfig extends TableFormatConfig {
bitsPerKey_ = bitsPerKey;
return this;
}
/**
* Indicating if we'd put index/filter blocks to the block cache.
If not specified, each "table reader" object will pre-load index/filter
block during table initialization.
*
* @return if index and filter blocks should be put in block cache.
*/
public boolean cacheIndexAndFilterBlocks() {
return cacheIndexAndFilterBlocks_;
}
/**
* Indicating if we'd put index/filter blocks to the block cache.
If not specified, each "table reader" object will pre-load index/filter
block during table initialization.
*
* @param index and filter blocks should be put in block cache.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
boolean cacheIndexAndFilterBlocks) {
cacheIndexAndFilterBlocks_ = cacheIndexAndFilterBlocks;
return this;
}
/**
* Influence the behavior when kHashSearch is used.
if false, stores a precise prefix to block range mapping
if true, does not store prefix and allows prefix hash collision
(less memory consumption)
*
* @return if hash collisions should be allowed.
*/
public boolean hashIndexAllowCollision() {
return hashIndexAllowCollision_;
}
/**
* Influence the behavior when kHashSearch is used.
if false, stores a precise prefix to block range mapping
if true, does not store prefix and allows prefix hash collision
(less memory consumption)
*
* @param if hash collisions should be allowed.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setHashIndexAllowCollision(
boolean hashIndexAllowCollision) {
hashIndexAllowCollision_ = hashIndexAllowCollision;
return this;
}
/**
* Size of compressed block cache. If 0, then block_cache_compressed is set
* to null.
*
* @return size of compressed block cache.
*/
public long blockCacheCompressedSize() {
return blockCacheCompressedSize_;
}
/**
* Size of compressed block cache. If 0, then block_cache_compressed is set
* to null.
*
* @param size of compressed block cache.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockCacheCompressedSize(
long blockCacheCompressedSize) {
blockCacheCompressedSize_ = blockCacheCompressedSize;
return this;
}
/**
* Controls the number of shards for the block compressed cache.
* This is applied only if blockCompressedCacheSize is set to non-negative.
*
* @return numShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings.
*/
public int blockCacheCompressedNumShardBits() {
return blockCacheCompressedNumShardBits_;
}
/**
* Controls the number of shards for the block compressed cache.
* This is applied only if blockCompressedCacheSize is set to non-negative.
*
* @param numShardBits the number of shard bits. The resulting
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
*/
public BlockBasedTableConfig setBlockCacheCompressedNumShardBits(
int blockCacheCompressedNumShardBits) {
blockCacheCompressedNumShardBits_ = blockCacheCompressedNumShardBits;
return this;
}
@Override protected long newTableFactoryHandle() {
return newTableFactoryHandle(noBlockCache_, blockCacheSize_,
blockCacheNumShardBits_, blockSize_, blockSizeDeviation_,
blockRestartInterval_, wholeKeyFiltering_, bitsPerKey_,
cacheIndexAndFilterBlocks_, hashIndexAllowCollision_,
blockCacheCompressedSize_, blockCacheCompressedNumShardBits_);
return newTableFactoryHandle(noBlockCache_, blockCacheSize_, numShardBits_,
blockSize_, blockSizeDeviation_, blockRestartInterval_,
wholeKeyFiltering_, bitsPerKey_);
}
private native long newTableFactoryHandle(
boolean noBlockCache, long blockCacheSize, int blockCacheNumShardBits,
boolean noBlockCache, long blockCacheSize, int numShardbits,
long blockSize, int blockSizeDeviation, int blockRestartInterval,
boolean wholeKeyFiltering, int bitsPerKey,
boolean cacheIndexAndFilterBlocks, boolean hashIndexAllowCollision,
long blockCacheCompressedSize, int blockCacheCompressedNumShardBits);
boolean wholeKeyFiltering, int bitsPerKey);
private boolean noBlockCache_;
private long blockCacheSize_;
private int blockCacheNumShardBits_;
private int numShardBits_;
private long shard;
private long blockSize_;
private int blockSizeDeviation_;
private int blockRestartInterval_;
private boolean wholeKeyFiltering_;
private int bitsPerKey_;
private boolean cacheIndexAndFilterBlocks_;
private boolean hashIndexAllowCollision_;
private long blockCacheCompressedSize_;
private int blockCacheCompressedNumShardBits_;
}

View File

@@ -1,36 +0,0 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
*/
public class GenericRateLimiterConfig extends RateLimiterConfig {
private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000);
private static final int DEFAULT_FAIRNESS = 10;
public GenericRateLimiterConfig(long rateBytesPerSecond,
long refillPeriodMicros, int fairness) {
rateBytesPerSecond_ = rateBytesPerSecond;
refillPeriodMicros_ = refillPeriodMicros;
fairness_ = fairness;
}
public GenericRateLimiterConfig(long rateBytesPerSecond) {
this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS);
}
@Override protected long newRateLimiterHandle() {
return newRateLimiterHandle(rateBytesPerSecond_, refillPeriodMicros_,
fairness_);
}
private native long newRateLimiterHandle(long rateBytesPerSecond,
long refillPeriodMicros, int fairness);
private final long rateBytesPerSecond_;
private final long refillPeriodMicros_;
private final int fairness_;
}

View File

@@ -1,58 +0,0 @@
package org.rocksdb;
import java.io.*;
/**
* This class is used to load the RocksDB shared library from within the jar.
* The shared library is extracted to a temp folder and loaded from there.
*/
public class NativeLibraryLoader {
private static String sharedLibraryName = "librocksdbjni.so";
private static String tempFilePrefix = "librocksdbjni";
private static String tempFileSuffix = ".so";
public static void loadLibraryFromJar(String tmpDir)
throws IOException {
File temp;
if(tmpDir == null || tmpDir.equals(""))
temp = File.createTempFile(tempFilePrefix, tempFileSuffix);
else
temp = new File(tmpDir + "/" + sharedLibraryName);
temp.deleteOnExit();
if (!temp.exists()) {
throw new RuntimeException("File " + temp.getAbsolutePath() + " does not exist.");
}
byte[] buffer = new byte[102400];
int readBytes;
InputStream is = ClassLoader.getSystemClassLoader().getResourceAsStream(sharedLibraryName);
if (is == null) {
throw new RuntimeException(sharedLibraryName + " was not found inside JAR.");
}
OutputStream os = null;
try {
os = new FileOutputStream(temp);
while ((readBytes = is.read(buffer)) != -1) {
os.write(buffer, 0, readBytes);
}
} finally {
if(os != null)
os.close();
if(is != null)
is.close();
}
System.load(temp.getAbsolutePath());
}
/**
* Private constructor to disallow instantiation
*/
private NativeLibraryLoader() {
}
}

View File

@@ -13,19 +13,8 @@ package org.rocksdb;
* native resources will be released as part of the process.
*/
public class Options extends RocksObject {
static {
RocksDB.loadLibrary();
}
static final long DEFAULT_CACHE_SIZE = 8 << 20;
static final int DEFAULT_NUM_SHARD_BITS = -1;
/**
* Builtin RocksDB comparators
*/
public enum BuiltinComparator {
BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR;
}
/**
* Construct options for opening a RocksDB.
*
@@ -86,21 +75,6 @@ public class Options extends RocksObject {
return createIfMissing(nativeHandle_);
}
/**
* Set BuiltinComparator to be used with RocksDB.
*
* Note: Comparator can be set once upon database creation.
*
* Default: BytewiseComparator.
* @param builtinComparator a BuiltinComparator type.
*/
public void setBuiltinComparator(BuiltinComparator builtinComparator) {
assert(isInitialized());
setBuiltinComparator(nativeHandle_, builtinComparator.ordinal());
}
private native void setBuiltinComparator(long handle, int builtinComparator);
/**
* Amount of data to build up in memory (backed by an unsorted log
* on disk) before converting to a sorted on-disk file.
@@ -331,6 +305,40 @@ public class Options extends RocksObject {
}
private native void setUseFsync(long handle, boolean useFsync);
/**
* The time interval in seconds between each two consecutive stats logs.
* This number controls how often a new scribe log about
* db deploy stats is written out.
* -1 indicates no logging at all.
*
* @return the time interval in seconds between each two consecutive
* stats logs.
*/
public int dbStatsLogInterval() {
assert(isInitialized());
return dbStatsLogInterval(nativeHandle_);
}
private native int dbStatsLogInterval(long handle);
/**
* The time interval in seconds between each two consecutive stats logs.
* This number controls how often a new scribe log about
* db deploy stats is written out.
* -1 indicates no logging at all.
* Default value is 1800 (half an hour).
*
* @param dbStatsLogInterval the time interval in seconds between each
* two consecutive stats logs.
* @return the reference to the current option.
*/
public Options setDbStatsLogInterval(int dbStatsLogInterval) {
assert(isInitialized());
setDbStatsLogInterval(nativeHandle_, dbStatsLogInterval);
return this;
}
private native void setDbStatsLogInterval(
long handle, int dbStatsLogInterval);
/**
* Returns the directory of info log.
*
@@ -1093,6 +1101,33 @@ public class Options extends RocksObject {
private native void setBytesPerSync(
long handle, long bytesPerSync);
/**
* Allow RocksDB to use thread local storage to optimize performance.
* Default: true
*
* @return true if thread-local storage is allowed
*/
public boolean allowThreadLocal() {
assert(isInitialized());
return allowThreadLocal(nativeHandle_);
}
private native boolean allowThreadLocal(long handle);
/**
* Allow RocksDB to use thread local storage to optimize performance.
* Default: true
*
* @param allowThreadLocal true if thread-local storage is allowed.
* @return the reference to the current option.
*/
public Options setAllowThreadLocal(boolean allowThreadLocal) {
assert(isInitialized());
setAllowThreadLocal(nativeHandle_, allowThreadLocal);
return this;
}
private native void setAllowThreadLocal(
long handle, boolean allowThreadLocal);
/**
* Set the config for mem-table.
*
@@ -1103,19 +1138,6 @@ public class Options extends RocksObject {
setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle());
return this;
}
/**
* Use to control write rate of flush and compaction. Flush has higher
* priority than compaction. Rate limiting is disabled if nullptr.
* Default: nullptr
*
* @param config rate limiter config.
* @return the instance of the current Options.
*/
public Options setRateLimiterConfig(RateLimiterConfig config) {
setRateLimiter(nativeHandle_, config.newRateLimiterHandle());
return this;
}
/**
* Returns the name of the current mem table representation.
@@ -2204,8 +2226,6 @@ public class Options extends RocksObject {
private native long statisticsPtr(long optHandle);
private native void setMemTableFactory(long handle, long factoryHandle);
private native void setRateLimiter(long handle,
long rateLimiterHandle);
private native String memTableFactoryName(long handle);
private native void setTableFactory(long handle, long factoryHandle);

View File

@@ -1,20 +0,0 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Config for rate limiter, which is used to control write rate of flush and
* compaction.
*/
public abstract class RateLimiterConfig {
/**
* This function should only be called by Options.setRateLimiter(),
* which will create a c++ shared-pointer to the c++ RateLimiter
* that is associated with the Java RateLimtierConifg.
*
* @see Options.setRateLimiter()
*/
abstract protected long newRateLimiterHandle();
}

View File

@@ -11,7 +11,6 @@ import java.util.HashMap;
import java.io.Closeable;
import java.io.IOException;
import org.rocksdb.util.Environment;
import org.rocksdb.NativeLibraryLoader;
/**
* A RocksDB is a persistent ordered map from keys to values. It is safe for
@@ -24,19 +23,11 @@ public class RocksDB extends RocksObject {
private static final String[] compressionLibs_ = {
"snappy", "z", "bzip2", "lz4", "lz4hc"};
static {
RocksDB.loadLibrary();
}
/**
* Loads the necessary library files.
* Calling this method twice will have no effect.
* By default the method extracts the shared library for loading at
* java.io.tmpdir, however, you can override this temporary location by
* setting the environment variable ROCKSDB_SHAREDLIB_DIR.
*/
public static synchronized void loadLibrary() {
String tmpDir = System.getenv("ROCKSDB_SHAREDLIB_DIR");
// loading possibly necessary libraries.
for (String lib : compressionLibs_) {
try {
@@ -45,14 +36,8 @@ public class RocksDB extends RocksObject {
// since it may be optional, we ignore its loading failure here.
}
}
try
{
NativeLibraryLoader.loadLibraryFromJar(tmpDir);
}
catch (IOException e)
{
throw new RuntimeException("Unable to load the RocksDB shared library" + e);
}
// However, if any of them is required. We will see error here.
System.loadLibrary("rocksdbjni");
}
/**
@@ -324,26 +309,6 @@ public class RocksDB extends RocksObject {
throws RocksDBException {
remove(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
}
/**
* DB implementations can export properties about their state
via this method. If "property" is a valid property understood by this
DB implementation, fills "*value" with its current value and returns
true. Otherwise returns false.
Valid property names include:
"rocksdb.num-files-at-level<N>" - return the number of files at level <N>,
where <N> is an ASCII representation of a level number (e.g. "0").
"rocksdb.stats" - returns a multi-line string that describes statistics
about the internal operation of the DB.
"rocksdb.sstables" - returns a multi-line string that describes all
of the sstables that make up the db contents.
*/
public String getProperty(String property) throws RocksDBException {
return getProperty0(nativeHandle_, property, property.length());
}
/**
* Return a heap-allocated iterator over the contents of the database.
@@ -398,8 +363,6 @@ public class RocksDB extends RocksObject {
protected native void remove(
long handle, long writeOptHandle,
byte[] key, int keyLen) throws RocksDBException;
protected native String getProperty0(long nativeHandle,
String property, int propertyLength) throws RocksDBException;
protected native long iterator0(long optHandle);
private native void disposeInternal(long handle);

View File

@@ -255,7 +255,7 @@ public class DbBenchmark {
for (long j = 0; j < entriesPerBatch_; j++) {
getKey(key, i + j, keyRange_);
DbBenchmark.this.gen_.generate(value);
batch.put(key, value);
db_.put(writeOpt_, key, value);
stats_.finishedSingleOp(keySize_ + valueSize_);
}
db_.write(writeOpt_, batch);

View File

@@ -52,6 +52,12 @@ public class OptionsTest {
assert(opt.useFsync() == boolValue);
}
{ // DbStatsLogInterval test
int intValue = rand.nextInt();
opt.setDbStatsLogInterval(intValue);
assert(opt.dbStatsLogInterval() == intValue);
}
{ // DbLogDir test
String str = "path/to/DbLogDir";
opt.setDbLogDir(str);
@@ -184,6 +190,12 @@ public class OptionsTest {
assert(opt.bytesPerSync() == longValue);
}
{ // AllowThreadLocal test
boolean boolValue = rand.nextBoolean();
opt.setAllowThreadLocal(boolValue);
assert(opt.allowThreadLocal() == boolValue);
}
{ // WriteBufferSize test
long longValue = rand.nextLong();
opt.setWriteBufferSize(longValue);

View File

@@ -5,7 +5,6 @@
//
// This file implements the "bridge" between Java and C++ for MemTables.
#include "rocksjni/portal.h"
#include "include/org_rocksdb_HashSkipListMemTableConfig.h"
#include "include/org_rocksdb_HashLinkedListMemTableConfig.h"
#include "include/org_rocksdb_VectorMemTableConfig.h"
@@ -21,7 +20,7 @@ jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject jobj, jlong jbucket_count,
jint jheight, jint jbranching_factor) {
return reinterpret_cast<jlong>(rocksdb::NewHashSkipListRepFactory(
rocksdb::jlong_to_size_t(jbucket_count),
static_cast<size_t>(jbucket_count),
static_cast<int32_t>(jheight),
static_cast<int32_t>(jbranching_factor)));
}
@@ -34,7 +33,7 @@ jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject jobj, jlong jbucket_count) {
return reinterpret_cast<jlong>(rocksdb::NewHashLinkListRepFactory(
rocksdb::jlong_to_size_t(jbucket_count)));
static_cast<size_t>(jbucket_count)));
}
/*
@@ -45,7 +44,7 @@ jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle(
jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject jobj, jlong jreserved_size) {
return reinterpret_cast<jlong>(new rocksdb::VectorRepFactory(
rocksdb::jlong_to_size_t(jreserved_size)));
static_cast<size_t>(jreserved_size)));
}
/*

View File

@@ -21,8 +21,6 @@
#include "rocksdb/memtablerep.h"
#include "rocksdb/table.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/rate_limiter.h"
#include "rocksdb/comparator.h"
/*
* Class: org_rocksdb_Options
@@ -64,23 +62,6 @@ jboolean Java_org_rocksdb_Options_createIfMissing(
return reinterpret_cast<rocksdb::Options*>(jhandle)->create_if_missing;
}
/*
* Class: org_rocksdb_Options
* Method: useReverseBytewiseComparator
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setBuiltinComparator(
JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) {
switch (builtinComparator){
case 1:
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator = rocksdb::ReverseBytewiseComparator();
break;
default:
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator = rocksdb::BytewiseComparator();
break;
}
}
/*
* Class: org_rocksdb_Options
* Method: setWriteBufferSize
@@ -89,7 +70,7 @@ void Java_org_rocksdb_Options_setBuiltinComparator(
void Java_org_rocksdb_Options_setWriteBufferSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_buffer_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->write_buffer_size =
rocksdb::jlong_to_size_t(jwrite_buffer_size);
static_cast<size_t>(jwrite_buffer_size);
}
@@ -381,7 +362,7 @@ jlong Java_org_rocksdb_Options_maxLogFileSize(
void Java_org_rocksdb_Options_setMaxLogFileSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size =
rocksdb::jlong_to_size_t(max_log_file_size);
static_cast<size_t>(max_log_file_size);
}
/*
@@ -402,7 +383,7 @@ jlong Java_org_rocksdb_Options_logFileTimeToRoll(
void Java_org_rocksdb_Options_setLogFileTimeToRoll(
JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) {
reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll =
rocksdb::jlong_to_size_t(log_file_time_to_roll);
static_cast<size_t>(log_file_time_to_roll);
}
/*
@@ -423,7 +404,7 @@ jlong Java_org_rocksdb_Options_keepLogFileNum(
void Java_org_rocksdb_Options_setKeepLogFileNum(
JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) {
reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num =
rocksdb::jlong_to_size_t(keep_log_file_num);
static_cast<size_t>(keep_log_file_num);
}
/*
@@ -478,17 +459,6 @@ void Java_org_rocksdb_Options_setMemTableFactory(
reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
}
/*
* Class: org_rocksdb_Options
* Method: setRateLimiter
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setRateLimiter(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->rate_limiter.reset(
reinterpret_cast<rocksdb::RateLimiter*>(jrate_limiter_handle));
}
/*
* Class: org_rocksdb_Options
* Method: tableCacheNumshardbits
@@ -539,8 +509,7 @@ void Java_org_rocksdb_Options_setTableCacheRemoveScanCountLimit(
void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor(
JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) {
reinterpret_cast<rocksdb::Options*>(jhandle)->prefix_extractor.reset(
rocksdb::NewFixedPrefixTransform(
rocksdb::jlong_to_size_t(jprefix_length)));
rocksdb::NewFixedPrefixTransform(static_cast<size_t>(jprefix_length)));
}
/*
@@ -604,7 +573,7 @@ jlong Java_org_rocksdb_Options_manifestPreallocationSize(
void Java_org_rocksdb_Options_setManifestPreallocationSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->manifest_preallocation_size =
rocksdb::jlong_to_size_t(preallocation_size);
static_cast<size_t>(preallocation_size);
}
/*
@@ -807,6 +776,27 @@ void Java_org_rocksdb_Options_setBytesPerSync(
static_cast<int64_t>(bytes_per_sync);
}
/*
* Class: org_rocksdb_Options
* Method: allowThreadLocal
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_allowThreadLocal(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_thread_local;
}
/*
* Class: org_rocksdb_Options
* Method: setAllowThreadLocal
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAllowThreadLocal(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_thread_local) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_thread_local =
static_cast<bool>(allow_thread_local);
}
/*
* Method: tableFactoryName
* Signature: (J)Ljava/lang/String
@@ -1255,7 +1245,7 @@ jlong Java_org_rocksdb_Options_arenaBlockSize(
void Java_org_rocksdb_Options_setArenaBlockSize(
JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->arena_block_size =
rocksdb::jlong_to_size_t(jarena_block_size);
static_cast<size_t>(jarena_block_size);
}
/*
@@ -1420,7 +1410,7 @@ void Java_org_rocksdb_Options_setInplaceUpdateNumLocks(
jlong jinplace_update_num_locks) {
reinterpret_cast<rocksdb::Options*>(
jhandle)->inplace_update_num_locks =
rocksdb::jlong_to_size_t(jinplace_update_num_locks);
static_cast<size_t>(jinplace_update_num_locks);
}
/*
@@ -1511,7 +1501,7 @@ void Java_org_rocksdb_Options_setMaxSuccessiveMerges(
JNIEnv* env, jobject jobj, jlong jhandle,
jlong jmax_successive_merges) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_successive_merges =
rocksdb::jlong_to_size_t(jmax_successive_merges);
static_cast<size_t>(jmax_successive_merges);
}
/*

View File

@@ -11,19 +11,12 @@
#define JAVA_ROCKSJNI_PORTAL_H_
#include <jni.h>
#include <limits>
#include "rocksdb/db.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/utilities/backupable_db.h"
namespace rocksdb {
inline size_t jlong_to_size_t(const jlong& jvalue) {
return static_cast<uint64_t>(jvalue) <=
static_cast<uint64_t>(std::numeric_limits<size_t>::max()) ?
static_cast<size_t>(jvalue) : std::numeric_limits<size_t>::max();
}
// The portal class for org.rocksdb.RocksDB
class RocksDBJni {
public:

View File

@@ -1,24 +0,0 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ for RateLimiter.
#include "rocksjni/portal.h"
#include "include/org_rocksdb_GenericRateLimiterConfig.h"
#include "rocksdb/rate_limiter.h"
/*
* Class: org_rocksdb_GenericRateLimiterConfig
* Method: newRateLimiterHandle
* Signature: (JJI)J
*/
jlong Java_org_rocksdb_GenericRateLimiterConfig_newRateLimiterHandle(
JNIEnv* env, jobject jobj, jlong jrate_bytes_per_second,
jlong jrefill_period_micros, jint jfairness) {
return reinterpret_cast<jlong>(rocksdb::NewGenericRateLimiter(
rocksdb::jlong_to_size_t(jrate_bytes_per_second),
rocksdb::jlong_to_size_t(jrefill_period_micros),
static_cast<int32_t>(jfairness)));
}

View File

@@ -425,27 +425,3 @@ jlong Java_org_rocksdb_RocksDB_iterator0(
rocksdb::Iterator* iterator = db->NewIterator(rocksdb::ReadOptions());
return reinterpret_cast<jlong>(iterator);
}
/*
* Class: org_rocksdb_RocksDB
* Method: getProperty0
* Signature: (JLjava/lang/String;I)Ljava/lang/String;
*/
jstring Java_org_rocksdb_RocksDB_getProperty0(
JNIEnv* env, jobject jdb, jlong db_handle, jstring jproperty,
jint jproperty_len) {
auto db = reinterpret_cast<rocksdb::DB*>(db_handle);
const char* property = env->GetStringUTFChars(jproperty, 0);
rocksdb::Slice property_slice(property, jproperty_len);
std::string property_value;
bool retCode = db->GetProperty(property_slice, &property_value);
env->ReleaseStringUTFChars(jproperty, property);
if (!retCode) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
}
return env->NewStringUTF(property_value.data());
}

View File

@@ -31,22 +31,20 @@ jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle(
/*
* Class: org_rocksdb_BlockBasedTableConfig
* Method: newTableFactoryHandle
* Signature: (ZJIJIIZIZZJI)J
* Signature: (ZJIJIIZI)J
*/
jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
JNIEnv* env, jobject jobj, jboolean no_block_cache, jlong block_cache_size,
jint block_cache_num_shardbits, jlong block_size, jint block_size_deviation,
jint num_shardbits, jlong block_size, jint block_size_deviation,
jint block_restart_interval, jboolean whole_key_filtering,
jint bits_per_key, jboolean cache_index_and_filter_blocks,
jboolean hash_index_allow_collision, jlong block_cache_compressed_size,
jint block_cache_compressd_num_shard_bits) {
jint bits_per_key) {
rocksdb::BlockBasedTableOptions options;
options.no_block_cache = no_block_cache;
if (!no_block_cache && block_cache_size > 0) {
if (block_cache_num_shardbits > 0) {
if (num_shardbits > 0) {
options.block_cache =
rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits);
rocksdb::NewLRUCache(block_cache_size, num_shardbits);
} else {
options.block_cache = rocksdb::NewLRUCache(block_cache_size);
}
@@ -58,17 +56,5 @@ jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
if (bits_per_key > 0) {
options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bits_per_key));
}
options.cache_index_and_filter_blocks = cache_index_and_filter_blocks;
options.hash_index_allow_collision = hash_index_allow_collision;
if (block_cache_compressed_size > 0) {
if (block_cache_compressd_num_shard_bits > 0) {
options.block_cache =
rocksdb::NewLRUCache(block_cache_compressed_size,
block_cache_compressd_num_shard_bits);
} else {
options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size);
}
}
return reinterpret_cast<jlong>(rocksdb::NewBlockBasedTableFactory(options));
}

View File

@@ -12,14 +12,12 @@
#include "include/org_rocksdb_WriteBatchTest.h"
#include "rocksjni/portal.h"
#include "rocksdb/db.h"
#include "rocksdb/immutable_options.h"
#include "db/memtable.h"
#include "rocksdb/write_batch.h"
#include "db/write_batch_internal.h"
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
#include "util/logging.h"
#include "util/scoped_arena_iterator.h"
#include "util/testharness.h"
/*
@@ -30,7 +28,7 @@
void Java_org_rocksdb_WriteBatch_newWriteBatch(
JNIEnv* env, jobject jobj, jint jreserved_bytes) {
rocksdb::WriteBatch* wb = new rocksdb::WriteBatch(
rocksdb::jlong_to_size_t(jreserved_bytes));
static_cast<size_t>(jreserved_bytes));
rocksdb::WriteBatchJni::setHandle(env, jobj, wb);
}
@@ -204,19 +202,14 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
auto factory = std::make_shared<rocksdb::SkipListFactory>();
rocksdb::Options options;
options.memtable_factory = factory;
rocksdb::MemTable* mem = new rocksdb::MemTable(
cmp, rocksdb::ImmutableCFOptions(options),
rocksdb::MemTableOptions(rocksdb::MutableCFOptions(options,
rocksdb::ImmutableCFOptions(options)), options));
rocksdb::MemTable* mem = new rocksdb::MemTable(cmp, options);
mem->Ref();
std::string state;
rocksdb::ColumnFamilyMemTablesDefault cf_mems_default(mem, &options);
rocksdb::Status s =
rocksdb::WriteBatchInternal::InsertInto(b, &cf_mems_default);
int count = 0;
rocksdb::Arena arena;
rocksdb::ScopedArenaIterator iter(mem->NewIterator(
rocksdb::ReadOptions(), &arena));
rocksdb::Iterator* iter = mem->NewIterator(rocksdb::ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
rocksdb::ParsedInternalKey ikey;
memset(reinterpret_cast<void*>(&ikey), 0, sizeof(ikey));
@@ -251,6 +244,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
state.append("@");
state.append(rocksdb::NumberToString(ikey.sequence));
}
delete iter;
if (!s.ok()) {
state.append(s.ToString());
} else if (count != rocksdb::WriteBatchInternal::Count(b)) {