Squashed 'src/rocksdb/' content from commit 224932d

git-subtree-dir: src/rocksdb
git-subtree-split: 224932d4d0b561712107d747c662df181c39644d
This commit is contained in:
Vinnie Falco
2014-08-08 11:57:41 -07:00
commit f86d9fd626
435 changed files with 123706 additions and 0 deletions

View File

@@ -0,0 +1,80 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* A subclass of RocksDB which supports backup-related operations.
*
* @see BackupableDBOptions
*/
public class BackupableDB extends RocksDB {
/**
* Open a BackupableDB under the specified path.
* Note that the backup path should be set properly in the
* input BackupableDBOptions.
*
* @param opt options for db.
* @param bopt backup related options.
* @param the db path for storing data. The path for storing
* backup should be specified in the BackupableDBOptions.
* @return reference to the opened BackupableDB.
*/
public static BackupableDB open(
Options opt, BackupableDBOptions bopt, String db_path)
throws RocksDBException {
RocksDB db = RocksDB.open(opt, db_path);
BackupableDB bdb = new BackupableDB();
bdb.open(db.nativeHandle_, bopt.nativeHandle_);
// Prevent the RocksDB object from attempting to delete
// the underly C++ DB object.
db.disOwnNativeHandle();
return bdb;
}
/**
* Captures the state of the database in the latest backup.
* Note that this function is not thread-safe.
*
* @param flushBeforeBackup if true, then all data will be flushed
* before creating backup.
*/
public void createNewBackup(boolean flushBeforeBackup) {
createNewBackup(nativeHandle_, flushBeforeBackup);
}
/**
* Close the BackupableDB instance and release resource.
*
* Internally, BackupableDB owns the rocksdb::DB pointer to its
* associated RocksDB. The release of that RocksDB pointer is
* handled in the destructor of the c++ rocksdb::BackupableDB and
* should be transparent to Java developers.
*/
@Override public synchronized void close() {
if (isInitialized()) {
super.close();
}
}
/**
* A protected construction that will be used in the static factory
* method BackupableDB.open().
*/
protected BackupableDB() {
super();
}
@Override protected void finalize() {
close();
}
protected native void open(long rocksDBHandle, long backupDBOptionsHandle);
protected native void createNewBackup(long handle, boolean flag);
}

View File

@@ -0,0 +1,73 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* BackupableDBOptions to control the behavior of a backupable database.
* It will be used during the creation of a BackupableDB.
*
* Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++.
*
* @param path Where to keep the backup files. Has to be different than dbname.
Best to set this to dbname_ + "/backups"
* @param shareTableFiles If share_table_files == true, backup will assume that
* table files with same name have the same contents. This enables
* incremental backups and avoids unnecessary data copies. If
* share_table_files == false, each backup will be on its own and will not
* share any data with other backups. default: true
* @param sync If sync == true, we can guarantee you'll get consistent backup
* even on a machine crash/reboot. Backup process is slower with sync
* enabled. If sync == false, we don't guarantee anything on machine reboot.
* However, chances are some of the backups are consistent. Default: true
* @param destroyOldData If true, it will delete whatever backups there are
* already. Default: false
* @param backupLogFiles If false, we won't backup log files. This option can be
* useful for backing up in-memory databases where log file are persisted,
* but table files are in memory. Default: true
* @param backupRateLimit Max bytes that can be transferred in a second during
* backup. If 0 or negative, then go as fast as you can. Default: 0
* @param restoreRateLimit Max bytes that can be transferred in a second during
* restore. If 0 or negative, then go as fast as you can. Default: 0
*/
public class BackupableDBOptions extends RocksObject {
public BackupableDBOptions(String path, boolean shareTableFiles, boolean sync,
boolean destroyOldData, boolean backupLogFiles, long backupRateLimit,
long restoreRateLimit) {
super();
backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit;
restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit;
newBackupableDBOptions(path, shareTableFiles, sync, destroyOldData,
backupLogFiles, backupRateLimit, restoreRateLimit);
}
/**
* Returns the path to the BackupableDB directory.
*
* @return the path to the BackupableDB directory.
*/
public String backupDir() {
assert(isInitialized());
return backupDir(nativeHandle_);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void newBackupableDBOptions(String path,
boolean shareTableFiles, boolean sync, boolean destroyOldData,
boolean backupLogFiles, long backupRateLimit, long restoreRateLimit);
private native String backupDir(long handle);
private native void disposeInternal(long handle);
}

View File

@@ -0,0 +1,37 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* This class creates a new filter policy that uses a bloom filter
* with approximately the specified number of bits per key.
* A good value for bitsPerKey is 10, which yields a filter
* with ~ 1% false positive rate.
*
* Default value of bits per key is 10.
*/
public class BloomFilter extends Filter {
private static final int DEFAULT_BITS_PER_KEY = 10;
private final int bitsPerKey_;
public BloomFilter() {
this(DEFAULT_BITS_PER_KEY);
}
public BloomFilter(int bitsPerKey) {
super();
bitsPerKey_ = bitsPerKey;
createNewFilter();
}
@Override
protected void createNewFilter() {
createNewFilter0(bitsPerKey_);
}
private native void createNewFilter0(int bitsKeyKey);
}

View File

@@ -0,0 +1,22 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public enum CompactionStyle {
LEVEL((byte) 0),
UNIVERSAL((byte) 1),
FIFO((byte) 2);
private final byte value_;
private CompactionStyle(byte value) {
value_ = value;
}
public byte getValue() {
return value_;
}
}

View File

@@ -0,0 +1,25 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public enum CompressionType {
NO_COMPRESSION((byte) 0),
SNAPPY_COMPRESSION((byte) 1),
ZLIB_COMPRESSION((byte) 2),
BZLIB2_COMPRESSION((byte) 3),
LZ4_COMPRESSION((byte) 4),
LZ4HC_COMPRESSION((byte) 5);
private final byte value_;
private CompressionType(byte value) {
value_ = value;
}
public byte getValue() {
return value_;
}
}

View File

@@ -0,0 +1,31 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Filters are stored in rocksdb and are consulted automatically
* by rocksdb to decide whether or not to read some
* information from disk. In many cases, a filter can cut down the
* number of disk seeks form a handful to a single disk seek per
* DB::Get() call.
*/
public abstract class Filter extends RocksObject {
protected abstract void createNewFilter();
/**
* Deletes underlying C++ filter pointer.
*
* Note that this function should be called only after all
* RocksDB instances referencing the filter are closed.
* Otherwise an undefined behavior will occur.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void disposeInternal(long handle);
}

View File

@@ -0,0 +1,52 @@
package org.rocksdb;
/**
* The config for hash linked list memtable representation
* Such memtable contains a fix-sized array of buckets, where
* each bucket points to a sorted singly-linked
* list (or null if the bucket is empty).
*
* Note that since this mem-table representation relies on the
* key prefix, it is required to invoke one of the usePrefixExtractor
* functions to specify how to extract key prefix given a key.
* If proper prefix-extractor is not set, then RocksDB will
* use the default memtable representation (SkipList) instead
* and post a warning in the LOG.
*/
public class HashLinkedListMemTableConfig extends MemTableConfig {
public static final long DEFAULT_BUCKET_COUNT = 50000;
public HashLinkedListMemTableConfig() {
bucketCount_ = DEFAULT_BUCKET_COUNT;
}
/**
* Set the number of buckets in the fixed-size array used
* in the hash linked-list mem-table.
*
* @param count the number of hash buckets.
* @return the reference to the current HashLinkedListMemTableConfig.
*/
public HashLinkedListMemTableConfig setBucketCount(long count) {
bucketCount_ = count;
return this;
}
/**
* Returns the number of buckets that will be used in the memtable
* created based on this config.
*
* @return the number of buckets
*/
public long bucketCount() {
return bucketCount_;
}
@Override protected long newMemTableFactoryHandle() {
return newMemTableFactoryHandle(bucketCount_);
}
private native long newMemTableFactoryHandle(long bucketCount);
private long bucketCount_;
}

View File

@@ -0,0 +1,97 @@
package org.rocksdb;
/**
* The config for hash skip-list mem-table representation.
* Such mem-table representation contains a fix-sized array of
* buckets, where each bucket points to a skiplist (or null if the
* bucket is empty).
*
* Note that since this mem-table representation relies on the
* key prefix, it is required to invoke one of the usePrefixExtractor
* functions to specify how to extract key prefix given a key.
* If proper prefix-extractor is not set, then RocksDB will
* use the default memtable representation (SkipList) instead
* and post a warning in the LOG.
*/
public class HashSkipListMemTableConfig extends MemTableConfig {
public static final int DEFAULT_BUCKET_COUNT = 1000000;
public static final int DEFAULT_BRANCHING_FACTOR = 4;
public static final int DEFAULT_HEIGHT = 4;
public HashSkipListMemTableConfig() {
bucketCount_ = DEFAULT_BUCKET_COUNT;
branchingFactor_ = DEFAULT_BRANCHING_FACTOR;
height_ = DEFAULT_HEIGHT;
}
/**
* Set the number of hash buckets used in the hash skiplist memtable.
* Default = 1000000.
*
* @param count the number of hash buckets used in the hash
* skiplist memtable.
* @return the reference to the current HashSkipListMemTableConfig.
*/
public HashSkipListMemTableConfig setBucketCount(long count) {
bucketCount_ = count;
return this;
}
/**
* @return the number of hash buckets
*/
public long bucketCount() {
return bucketCount_;
}
/**
* Set the height of the skip list. Default = 4.
*
* @return the reference to the current HashSkipListMemTableConfig.
*/
public HashSkipListMemTableConfig setHeight(int height) {
height_ = height;
return this;
}
/**
* @return the height of the skip list.
*/
public int height() {
return height_;
}
/**
* Set the branching factor used in the hash skip-list memtable.
* This factor controls the probabilistic size ratio between adjacent
* links in the skip list.
*
* @param bf the probabilistic size ratio between adjacent link
* lists in the skip list.
* @return the reference to the current HashSkipListMemTableConfig.
*/
public HashSkipListMemTableConfig setBranchingFactor(int bf) {
branchingFactor_ = bf;
return this;
}
/**
* @return branching factor, the probabilistic size ratio between
* adjacent links in the skip list.
*/
public int branchingFactor() {
return branchingFactor_;
}
@Override protected long newMemTableFactoryHandle() {
return newMemTableFactoryHandle(
bucketCount_, height_, branchingFactor_);
}
private native long newMemTableFactoryHandle(
long bucketCount, int height, int branchingFactor);
private long bucketCount_;
private int branchingFactor_;
private int height_;
}

View File

@@ -0,0 +1,43 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public class HistogramData {
private final double median_;
private final double percentile95_;
private final double percentile99_;
private final double average_;
private final double standardDeviation_;
public HistogramData(double median, double percentile95,
double percentile99, double average, double standardDeviation) {
median_ = median;
percentile95_ = percentile95;
percentile99_ = percentile99;
average_ = average;
standardDeviation_ = standardDeviation;
}
public double getMedian() {
return median_;
}
public double getPercentile95() {
return percentile95_;
}
public double getPercentile99() {
return percentile99_;
}
public double getAverage() {
return average_;
}
public double getStandardDeviation() {
return standardDeviation_;
}
}

View File

@@ -0,0 +1,39 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public enum HistogramType {
DB_GET(0),
DB_WRITE(1),
COMPACTION_TIME(2),
TABLE_SYNC_MICROS(3),
COMPACTION_OUTFILE_SYNC_MICROS(4),
WAL_FILE_SYNC_MICROS(5),
MANIFEST_FILE_SYNC_MICROS(6),
// TIME SPENT IN IO DURING TABLE OPEN
TABLE_OPEN_IO_MICROS(7),
DB_MULTIGET(8),
READ_BLOCK_COMPACTION_MICROS(9),
READ_BLOCK_GET_MICROS(10),
WRITE_RAW_BLOCK_MICROS(11),
STALL_L0_SLOWDOWN_COUNT(12),
STALL_MEMTABLE_COMPACTION_COUNT(13),
STALL_L0_NUM_FILES_COUNT(14),
HARD_RATE_LIMIT_DELAY_COUNT(15),
SOFT_RATE_LIMIT_DELAY_COUNT(16),
NUM_FILES_IN_SINGLE_COMPACTION(17);
private final int value_;
private HistogramType(int value) {
value_ = value;
}
public int getValue() {
return value_;
}
}

View File

@@ -0,0 +1,27 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* MemTableConfig is used to config the internal mem-table of a RocksDB.
* It is required for each memtable to have one such sub-class to allow
* Java developers to use it.
*
* To make a RocksDB to use a specific MemTable format, its associated
* MemTableConfig should be properly set and passed into Options
* via Options.setMemTableFactory() and open the db using that Options.
*
* @see Options
*/
public abstract class MemTableConfig {
/**
* This function should only be called by Options.setMemTableConfig(),
* which will create a c++ shared-pointer to the c++ MemTableRepFactory
* that associated with the Java MemTableConfig.
*
* @see Options.setMemTableFactory()
*/
abstract protected long newMemTableFactoryHandle();
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,123 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* The config for plain table sst format.
*
* PlainTable is a RocksDB's SST file format optimized for low query latency
* on pure-memory or really low-latency media. It also support prefix
* hash feature.
*/
public class PlainTableConfig extends TableFormatConfig {
public static final int VARIABLE_LENGTH = 0;
public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10;
public static final double DEFAULT_HASH_TABLE_RATIO = 0.75;
public static final int DEFAULT_INDEX_SPARSENESS = 16;
public PlainTableConfig() {
keySize_ = VARIABLE_LENGTH;
bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY;
hashTableRatio_ = DEFAULT_HASH_TABLE_RATIO;
indexSparseness_ = DEFAULT_INDEX_SPARSENESS;
}
/**
* Set the length of the user key. If it is set to be VARIABLE_LENGTH,
* then it indicates the user keys are variable-lengthed. Otherwise,
* all the keys need to have the same length in byte.
* DEFAULT: VARIABLE_LENGTH
*
* @param keySize the length of the user key.
* @return the reference to the current config.
*/
public PlainTableConfig setKeySize(int keySize) {
keySize_ = keySize;
return this;
}
/**
* @return the specified size of the user key. If VARIABLE_LENGTH,
* then it indicates variable-length key.
*/
public int keySize() {
return keySize_;
}
/**
* Set the number of bits per key used by the internal bloom filter
* in the plain table sst format.
*
* @param bitsPerKey the number of bits per key for bloom filer.
* @return the reference to the current config.
*/
public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) {
bloomBitsPerKey_ = bitsPerKey;
return this;
}
/**
* @return the number of bits per key used for the bloom filter.
*/
public int bloomBitsPerKey() {
return bloomBitsPerKey_;
}
/**
* hashTableRatio is the desired utilization of the hash table used
* for prefix hashing. The ideal ratio would be the number of
* prefixes / the number of hash buckets. If this value is set to
* zero, then hash table will not be used.
*
* @param ratio the hash table ratio.
* @return the reference to the current config.
*/
public PlainTableConfig setHashTableRatio(double ratio) {
hashTableRatio_ = ratio;
return this;
}
/**
* @return the hash table ratio.
*/
public double hashTableRatio() {
return hashTableRatio_;
}
/**
* Index sparseness determines the index interval for keys inside the
* same prefix. This number is equal to the maximum number of linear
* search required after hash and binary search. If it's set to 0,
* then each key will be indexed.
*
* @param sparseness the index sparseness.
* @return the reference to the current config.
*/
public PlainTableConfig setIndexSparseness(int sparseness) {
indexSparseness_ = sparseness;
return this;
}
/**
* @return the index sparseness.
*/
public int indexSparseness() {
return indexSparseness_;
}
@Override protected long newTableFactoryHandle() {
return newTableFactoryHandle(keySize_, bloomBitsPerKey_,
hashTableRatio_, indexSparseness_);
}
private native long newTableFactoryHandle(
int keySize, int bloomBitsPerKey,
double hashTableRatio, int indexSparseness);
private int keySize_;
private int bloomBitsPerKey_;
private double hashTableRatio_;
private int indexSparseness_;
}

View File

@@ -0,0 +1,125 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* The class that controls the get behavior.
*
* Note that dispose() must be called before an Options instance
* become out-of-scope to release the allocated memory in c++.
*/
public class ReadOptions extends RocksObject {
public ReadOptions() {
super();
newReadOptions();
}
private native void newReadOptions();
/**
* If true, all data read from underlying storage will be
* verified against corresponding checksums.
* Default: true
*
* @return true if checksum verification is on.
*/
public boolean verifyChecksums() {
assert(isInitialized());
return verifyChecksums(nativeHandle_);
}
private native boolean verifyChecksums(long handle);
/**
* If true, all data read from underlying storage will be
* verified against corresponding checksums.
* Default: true
*
* @param verifyChecksums if true, then checksum verification
* will be performed on every read.
* @return the reference to the current ReadOptions.
*/
public ReadOptions setVerifyChecksums(boolean verifyChecksums) {
assert(isInitialized());
setVerifyChecksums(nativeHandle_, verifyChecksums);
return this;
}
private native void setVerifyChecksums(
long handle, boolean verifyChecksums);
// TODO(yhchiang): this option seems to be block-based table only.
// move this to a better place?
/**
* Fill the cache when loading the block-based sst formated db.
* Callers may wish to set this field to false for bulk scans.
* Default: true
*
* @return true if the fill-cache behavior is on.
*/
public boolean fillCache() {
assert(isInitialized());
return fillCache(nativeHandle_);
}
private native boolean fillCache(long handle);
/**
* Fill the cache when loading the block-based sst formated db.
* Callers may wish to set this field to false for bulk scans.
* Default: true
*
* @param fillCache if true, then fill-cache behavior will be
* performed.
* @return the reference to the current ReadOptions.
*/
public ReadOptions setFillCache(boolean fillCache) {
assert(isInitialized());
setFillCache(nativeHandle_, fillCache);
return this;
}
private native void setFillCache(
long handle, boolean fillCache);
/**
* Specify to create a tailing iterator -- a special iterator that has a
* view of the complete database (i.e. it can also be used to read newly
* added data) and is optimized for sequential reads. It will return records
* that were inserted into the database after the creation of the iterator.
* Default: false
* Not supported in ROCKSDB_LITE mode!
*
* @return true if tailing iterator is enabled.
*/
public boolean tailing() {
assert(isInitialized());
return tailing(nativeHandle_);
}
private native boolean tailing(long handle);
/**
* Specify to create a tailing iterator -- a special iterator that has a
* view of the complete database (i.e. it can also be used to read newly
* added data) and is optimized for sequential reads. It will return records
* that were inserted into the database after the creation of the iterator.
* Default: false
* Not supported in ROCKSDB_LITE mode!
*
* @param tailing if true, then tailing iterator will be enabled.
* @return the reference to the current ReadOptions.
*/
public ReadOptions setTailing(boolean tailing) {
assert(isInitialized());
setTailing(nativeHandle_, tailing);
return this;
}
private native void setTailing(
long handle, boolean tailing);
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void disposeInternal(long handle);
}

View File

@@ -0,0 +1,84 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* This class is used to access information about backups and restore from them.
*
* Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++.
*
* @param options Instance of BackupableDBOptions.
*/
public class RestoreBackupableDB extends RocksObject {
public RestoreBackupableDB(BackupableDBOptions options) {
super();
nativeHandle_ = newRestoreBackupableDB(options.nativeHandle_);
}
/**
* Restore from backup with backup_id
* IMPORTANT -- if options_.share_table_files == true and you restore DB
* from some backup that is not the latest, and you start creating new
* backups from the new DB, they will probably fail.
*
* Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
* If you add new data to the DB and try creating a new backup now, the
* database will diverge from backups 4 and 5 and the new backup will fail.
* If you want to create new backup, you will first have to delete backups 4
* and 5.
*/
public void restoreDBFromBackup(long backupId, String dbDir, String walDir,
RestoreOptions restoreOptions) throws RocksDBException {
restoreDBFromBackup0(nativeHandle_, backupId, dbDir, walDir,
restoreOptions.nativeHandle_);
}
/**
* Restore from the latest backup.
*/
public void restoreDBFromLatestBackup(String dbDir, String walDir,
RestoreOptions restoreOptions) throws RocksDBException {
restoreDBFromLatestBackup0(nativeHandle_, dbDir, walDir,
restoreOptions.nativeHandle_);
}
/**
* Deletes old backups, keeping latest numBackupsToKeep alive.
*
* @param Number of latest backups to keep
*/
public void purgeOldBackups(int numBackupsToKeep) throws RocksDBException {
purgeOldBackups0(nativeHandle_, numBackupsToKeep);
}
/**
* Deletes a specific backup.
*
* @param ID of backup to delete.
*/
public void deleteBackup(long backupId) throws RocksDBException {
deleteBackup0(nativeHandle_, backupId);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override public synchronized void disposeInternal() {
assert(isInitialized());
dispose(nativeHandle_);
}
private native long newRestoreBackupableDB(long options);
private native void restoreDBFromBackup0(long nativeHandle, long backupId,
String dbDir, String walDir, long restoreOptions) throws RocksDBException;
private native void restoreDBFromLatestBackup0(long nativeHandle,
String dbDir, String walDir, long restoreOptions) throws RocksDBException;
private native void purgeOldBackups0(long nativeHandle, int numBackupsToKeep);
private native void deleteBackup0(long nativeHandle, long backupId);
private native void dispose(long nativeHandle);
}

View File

@@ -0,0 +1,37 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* RestoreOptions to control the behavior of restore.
*
* Note that dispose() must be called before this instance become out-of-scope
* to release the allocated memory in c++.
*
* @param If true, restore won't overwrite the existing log files in wal_dir. It
* will also move all log files from archive directory to wal_dir. Use this
* option in combination with BackupableDBOptions::backup_log_files = false
* for persisting in-memory databases.
* Default: false
*/
public class RestoreOptions extends RocksObject {
public RestoreOptions(boolean keepLogFiles) {
super();
nativeHandle_ = newRestoreOptions(keepLogFiles);
}
/**
* Release the memory allocated for the current instance
* in the c++ side.
*/
@Override public synchronized void disposeInternal() {
assert(isInitialized());
dispose(nativeHandle_);
}
private native long newRestoreOptions(boolean keepLogFiles);
private native void dispose(long handle);
}

View File

@@ -0,0 +1,372 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.io.Closeable;
import java.io.IOException;
import org.rocksdb.util.Environment;
/**
* A RocksDB is a persistent ordered map from keys to values. It is safe for
* concurrent access from multiple threads without any external synchronization.
* All methods of this class could potentially throw RocksDBException, which
* indicates sth wrong at the rocksdb library side and the call failed.
*/
public class RocksDB extends RocksObject {
public static final int NOT_FOUND = -1;
private static final String[] compressionLibs_ = {
"snappy", "z", "bzip2", "lz4", "lz4hc"};
/**
* Loads the necessary library files.
* Calling this method twice will have no effect.
*/
public static synchronized void loadLibrary() {
// loading possibly necessary libraries.
for (String lib : compressionLibs_) {
try {
System.loadLibrary(lib);
} catch (UnsatisfiedLinkError e) {
// since it may be optional, we ignore its loading failure here.
}
}
// However, if any of them is required. We will see error here.
System.loadLibrary("rocksdbjni");
}
/**
* Tries to load the necessary library files from the given list of
* directories.
*
* @param paths a list of strings where each describes a directory
* of a library.
*/
public static synchronized void loadLibrary(List<String> paths) {
for (String lib : compressionLibs_) {
for (String path : paths) {
try {
System.load(path + "/" + Environment.getSharedLibraryName(lib));
break;
} catch (UnsatisfiedLinkError e) {
// since they are optional, we ignore loading fails.
}
}
}
boolean success = false;
UnsatisfiedLinkError err = null;
for (String path : paths) {
try {
System.load(path + "/" + Environment.getJniLibraryName("rocksdbjni"));
success = true;
break;
} catch (UnsatisfiedLinkError e) {
err = e;
}
}
if (success == false) {
throw err;
}
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the default options w/ createIfMissing
* set to true.
*
* @param path the path to the rocksdb.
* @param status an out value indicating the status of the Open().
* @return a rocksdb instance on success, null if the specified rocksdb can
* not be opened.
*
* @see Options.setCreateIfMissing()
* @see Options.createIfMissing()
*/
public static RocksDB open(String path) throws RocksDBException {
RocksDB db = new RocksDB();
// This allows to use the rocksjni default Options instead of
// the c++ one.
Options options = new Options();
return open(options, path);
}
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the specified options and db path.
*
* Options instance *should* not be disposed before all DBs using this options
* instance have been closed. If user doesn't call options dispose explicitly,
* then this options instance will be GC'd automatically.
*
* Options instance can be re-used to open multiple DBs if DB statistics is
* not used. If DB statistics are required, then its recommended to open DB
* with new Options instance as underlying native statistics instance does not
* use any locks to prevent concurrent updates.
*/
public static RocksDB open(Options options, String path)
throws RocksDBException {
// when non-default Options is used, keeping an Options reference
// in RocksDB can prevent Java to GC during the life-time of
// the currently-created RocksDB.
RocksDB db = new RocksDB();
db.open(options.nativeHandle_, options.cacheSize_,
options.numShardBits_, path);
db.storeOptionsInstance(options);
return db;
}
private void storeOptionsInstance(Options options) {
options_ = options;
}
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
/**
* Close the RocksDB instance.
* This function is equivalent to dispose().
*/
public void close() {
dispose();
}
/**
* Set the database entry for "key" to "value".
*
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
*/
public void put(byte[] key, byte[] value) throws RocksDBException {
put(nativeHandle_, key, key.length, value, value.length);
}
/**
* Set the database entry for "key" to "value".
*
* @param key the specified key to be inserted.
* @param value the value associated with the specified key.
*/
public void put(WriteOptions writeOpts, byte[] key, byte[] value)
throws RocksDBException {
put(nativeHandle_, writeOpts.nativeHandle_,
key, key.length, value, value.length);
}
/**
* Apply the specified updates to the database.
*/
public void write(WriteOptions writeOpts, WriteBatch updates)
throws RocksDBException {
write(writeOpts.nativeHandle_, updates.nativeHandle_);
}
/**
* Get the value associated with the specified key.
*
* @param key the key to retrieve the value.
* @param value the out-value to receive the retrieved value.
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*/
public int get(byte[] key, byte[] value) throws RocksDBException {
return get(nativeHandle_, key, key.length, value, value.length);
}
/**
* Get the value associated with the specified key.
*
* @param key the key to retrieve the value.
* @param value the out-value to receive the retrieved value.
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* input buffer {@code value} is insufficient and partial result will
* be returned. RocksDB.NOT_FOUND will be returned if the value not
* found.
*/
public int get(ReadOptions opt, byte[] key, byte[] value)
throws RocksDBException {
return get(nativeHandle_, opt.nativeHandle_,
key, key.length, value, value.length);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @see RocksDBException
*/
public byte[] get(byte[] key) throws RocksDBException {
return get(nativeHandle_, key, key.length);
}
/**
* The simplified version of get which returns a new byte array storing
* the value associated with the specified input key if any. null will be
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
* any. null if it does not find the specified key.
*
* @see RocksDBException
*/
public byte[] get(ReadOptions opt, byte[] key) throws RocksDBException {
return get(nativeHandle_, opt.nativeHandle_, key, key.length);
}
/**
* Returns a map of keys for which values were found in DB.
*
* @param keys List of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
* @see RocksDBException
*/
public Map<byte[], byte[]> multiGet(List<byte[]> keys)
throws RocksDBException {
assert(keys.size() != 0);
List<byte[]> values = multiGet(
nativeHandle_, keys, keys.size());
Map<byte[], byte[]> keyValueMap = new HashMap<byte[], byte[]>();
for(int i = 0; i < values.size(); i++) {
if(values.get(i) == null) {
continue;
}
keyValueMap.put(keys.get(i), values.get(i));
}
return keyValueMap;
}
/**
* Returns a map of keys for which values were found in DB.
*
* @param List of keys for which values need to be retrieved.
* @param opt Read options.
* @return Map where key of map is the key passed by user and value for map
* entry is the corresponding value in DB.
*
* @see RocksDBException
*/
public Map<byte[], byte[]> multiGet(ReadOptions opt, List<byte[]> keys)
throws RocksDBException {
assert(keys.size() != 0);
List<byte[]> values = multiGet(
nativeHandle_, opt.nativeHandle_, keys, keys.size());
Map<byte[], byte[]> keyValueMap = new HashMap<byte[], byte[]>();
for(int i = 0; i < values.size(); i++) {
if(values.get(i) == null) {
continue;
}
keyValueMap.put(keys.get(i), values.get(i));
}
return keyValueMap;
}
/**
* Remove the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*/
public void remove(byte[] key) throws RocksDBException {
remove(nativeHandle_, key, key.length);
}
/**
* Remove the database entry (if any) for "key". Returns OK on
* success, and a non-OK status on error. It is not an error if "key"
* did not exist in the database.
*/
public void remove(WriteOptions writeOpt, byte[] key)
throws RocksDBException {
remove(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
}
/**
* Return a heap-allocated iterator over the contents of the database.
* The result of newIterator() is initially invalid (caller must
* call one of the Seek methods on the iterator before using it).
*
* Caller should close the iterator when it is no longer needed.
* The returned iterator should be closed before this db is closed.
*
* @return instance of iterator object.
*/
public RocksIterator newIterator() {
return new RocksIterator(iterator0(nativeHandle_));
}
/**
* Private constructor.
*/
protected RocksDB() {
super();
}
// native methods
protected native void open(
long optionsHandle, long cacheSize, int numShardBits,
String path) throws RocksDBException;
protected native void put(
long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException;
protected native void put(
long handle, long writeOptHandle,
byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException;
protected native void write(
long writeOptHandle, long batchHandle) throws RocksDBException;
protected native int get(
long handle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException;
protected native int get(
long handle, long readOptHandle, byte[] key, int keyLen,
byte[] value, int valueLen) throws RocksDBException;
protected native List<byte[]> multiGet(
long dbHandle, List<byte[]> keys, int keysCount);
protected native List<byte[]> multiGet(
long dbHandle, long rOptHandle, List<byte[]> keys, int keysCount);
protected native byte[] get(
long handle, byte[] key, int keyLen) throws RocksDBException;
protected native byte[] get(
long handle, long readOptHandle,
byte[] key, int keyLen) throws RocksDBException;
protected native void remove(
long handle, byte[] key, int keyLen) throws RocksDBException;
protected native void remove(
long handle, long writeOptHandle,
byte[] key, int keyLen) throws RocksDBException;
protected native long iterator0(long optHandle);
private native void disposeInternal(long handle);
protected Options options_;
}

View File

@@ -0,0 +1,23 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.*;
/**
* A RocksDBException encapsulates the error of an operation. This exception
* type is used to describe an internal error from the c++ rocksdb library.
*/
public class RocksDBException extends Exception {
/**
* The private construct used by a set of public static factory method.
*
* @param msg the specified error message.
*/
public RocksDBException(String msg) {
super(msg);
}
}

View File

@@ -0,0 +1,102 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* A RocksEnv is an interface used by the rocksdb implementation to access
* operating system functionality like the filesystem etc.
*
* All Env implementations are safe for concurrent access from
* multiple threads without any external synchronization.
*/
public class RocksEnv extends RocksObject {
public static final int FLUSH_POOL = 0;
public static final int COMPACTION_POOL = 1;
static {
default_env_ = new RocksEnv(getDefaultEnvInternal());
}
private static native long getDefaultEnvInternal();
/**
* Returns the default environment suitable for the current operating
* system.
*
* The result of getDefault() is a singleton whose ownership belongs
* to rocksdb c++. As a result, the returned RocksEnv will not
* have the ownership of its c++ resource, and calling its dispose()
* will be no-op.
*/
public static RocksEnv getDefault() {
return default_env_;
}
/**
* Sets the number of background worker threads of the flush pool
* for this environment.
* default number: 1
*/
public RocksEnv setBackgroundThreads(int num) {
return setBackgroundThreads(num, FLUSH_POOL);
}
/**
* Sets the number of background worker threads of the specified thread
* pool for this environment.
*
* @param num the number of threads
* @param poolID the id to specified a thread pool. Should be either
* FLUSH_POOL or COMPACTION_POOL.
* Default number: 1
*/
public RocksEnv setBackgroundThreads(int num, int poolID) {
setBackgroundThreads(nativeHandle_, num, poolID);
return this;
}
private native void setBackgroundThreads(
long handle, int num, int priority);
/**
* Returns the length of the queue associated with the specified
* thread pool.
*
* @param poolID the id to specified a thread pool. Should be either
* FLUSH_POOL or COMPACTION_POOL.
*/
public int getThreadPoolQueueLen(int poolID) {
return getThreadPoolQueueLen(nativeHandle_, poolID);
}
private native int getThreadPoolQueueLen(long handle, int poolID);
/**
* Package-private constructor that uses the specified native handle
* to construct a RocksEnv. Note that the ownership of the input handle
* belongs to the caller, and the newly created RocksEnv will not take
* the ownership of the input handle. As a result, calling dispose()
* of the created RocksEnv will be no-op.
*/
RocksEnv(long handle) {
super();
nativeHandle_ = handle;
disOwnNativeHandle();
}
/**
* The helper function of dispose() which all subclasses of RocksObject
* must implement to release their associated C++ resource.
*/
protected void disposeInternal() {
disposeInternal(nativeHandle_);
}
private native void disposeInternal(long handle);
/**
* The static default RocksEnv. The ownership of its native handle
* belongs to rocksdb c++ and is not able to be released on the Java
* side.
*/
static RocksEnv default_env_;
}

View File

@@ -0,0 +1,136 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* An iterator yields a sequence of key/value pairs from a source.
* The following class defines the interface. Multiple implementations
* are provided by this library. In particular, iterators are provided
* to access the contents of a Table or a DB.
*
* Multiple threads can invoke const methods on an RocksIterator without
* external synchronization, but if any of the threads may call a
* non-const method, all threads accessing the same RocksIterator must use
* external synchronization.
*/
public class RocksIterator extends RocksObject {
public RocksIterator(long nativeHandle) {
super();
nativeHandle_ = nativeHandle;
}
/**
* An iterator is either positioned at a key/value pair, or
* not valid. This method returns true iff the iterator is valid.
* @return true if iterator is valid.
*/
public boolean isValid() {
assert(isInitialized());
return isValid0(nativeHandle_);
}
/**
* Position at the first key in the source. The iterator is Valid()
* after this call iff the source is not empty.
*/
public void seekToFirst() {
assert(isInitialized());
seekToFirst0(nativeHandle_);
}
/**
* Position at the last key in the source. The iterator is
* Valid() after this call iff the source is not empty.
*/
public void seekToLast() {
assert(isInitialized());
seekToLast0(nativeHandle_);
}
/**
* Moves to the next entry in the source. After this call, Valid() is
* true iff the iterator was not positioned at the last entry in the source.
* REQUIRES: Valid()
*/
public void next() {
assert(isInitialized());
next0(nativeHandle_);
}
/**
* Moves to the previous entry in the source. After this call, Valid() is
* true iff the iterator was not positioned at the first entry in source.
* REQUIRES: Valid()
*/
public void prev() {
assert(isInitialized());
prev0(nativeHandle_);
}
/**
* Return the key for the current entry. The underlying storage for
* the returned slice is valid only until the next modification of
* the iterator.
* REQUIRES: Valid()
* @return key for the current entry.
*/
public byte[] key() {
assert(isInitialized());
return key0(nativeHandle_);
}
/**
* Return the value for the current entry. The underlying storage for
* the returned slice is valid only until the next modification of
* the iterator.
* REQUIRES: !AtEnd() && !AtStart()
* @return value for the current entry.
*/
public byte[] value() {
assert(isInitialized());
return value0(nativeHandle_);
}
/**
* Position at the first key in the source that at or past target
* The iterator is Valid() after this call iff the source contains
* an entry that comes at or past target.
*/
public void seek(byte[] target) {
assert(isInitialized());
seek0(nativeHandle_, target, target.length);
}
/**
* If an error has occurred, return it. Else return an ok status.
* If non-blocking IO is requested and this operation cannot be
* satisfied without doing some IO, then this returns Status::Incomplete().
*
*/
public void status() throws RocksDBException {
assert(isInitialized());
status0(nativeHandle_);
}
/**
* Deletes underlying C++ iterator pointer.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native boolean isValid0(long handle);
private native void disposeInternal(long handle);
private native void seekToFirst0(long handle);
private native void seekToLast0(long handle);
private native void next0(long handle);
private native void prev0(long handle);
private native byte[] key0(long handle);
private native byte[] value0(long handle);
private native void seek0(long handle, byte[] target, int targetLen);
private native void status0(long handle);
}

View File

@@ -0,0 +1,117 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* RocksObject is the base-class of all RocksDB classes that has a pointer to
* some c++ rocksdb object.
*
* RocksObject has dispose() function, which releases its associated c++ resource.
* This function can be either called manually, or being called automatically
* during the regular Java GC process. However, since Java may wrongly assume a
* RocksObject only contains a long member variable and think it is small in size,
* Java may give RocksObject low priority in the GC process. For this, it is
* suggested to call dispose() manually. However, it is safe to let RocksObject go
* out-of-scope without manually calling dispose() as dispose() will be called
* in the finalizer during the regular GC process.
*/
public abstract class RocksObject {
protected RocksObject() {
nativeHandle_ = 0;
owningHandle_ = true;
}
/**
* Release the c++ object manually pointed by the native handle.
*
* Note that dispose() will also be called during the GC process
* if it was not called before its RocksObject went out-of-scope.
* However, since Java may wrongly wrongly assume those objects are
* small in that they seems to only hold a long variable. As a result,
* they might have low priority in the GC process. To prevent this,
* it is suggested to call dispose() manually.
*
* Note that once an instance of RocksObject has been disposed,
* calling its function will lead undefined behavior.
*/
public final synchronized void dispose() {
if (isOwningNativeHandle() && isInitialized()) {
disposeInternal();
}
nativeHandle_ = 0;
disOwnNativeHandle();
}
/**
* The helper function of dispose() which all subclasses of RocksObject
* must implement to release their associated C++ resource.
*/
protected abstract void disposeInternal();
/**
* Revoke ownership of the native object.
*
* This will prevent the object from attempting to delete the underlying
* native object in its finalizer. This must be used when another object
* takes over ownership of the native object or both will attempt to delete
* the underlying object when garbage collected.
*
* When disOwnNativeHandle() is called, dispose() will simply set nativeHandle_
* to 0 without releasing its associated C++ resource. As a result,
* incorrectly use this function may cause memory leak, and this function call
* will not affect the return value of isInitialized().
*
* @see dispose()
* @see isInitialized()
*/
protected void disOwnNativeHandle() {
owningHandle_ = false;
}
/**
* Returns true if the current RocksObject is responsable to release its
* native handle.
*
* @return true if the current RocksObject is responsible to release its
* native handle.
*
* @see disOwnNativeHandle()
* @see dispose()
*/
protected boolean isOwningNativeHandle() {
return owningHandle_;
}
/**
* Returns true if the associated native handle has been initialized.
*
* @return true if the associated native handle has been initialized.
*
* @see dispose()
*/
protected boolean isInitialized() {
return (nativeHandle_ != 0);
}
/**
* Simply calls dispose() and release its c++ resource if it has not
* yet released.
*/
@Override protected void finalize() {
dispose();
}
/**
* A long variable holding c++ pointer pointing to some RocksDB C++ object.
*/
protected long nativeHandle_;
/**
* A flag indicating whether the current RocksObject is responsible to
* release the c++ object stored in its nativeHandle_.
*/
private boolean owningHandle_;
}

View File

@@ -0,0 +1,15 @@
package org.rocksdb;
/**
* The config for skip-list memtable representation.
*/
public class SkipListMemTableConfig extends MemTableConfig {
public SkipListMemTableConfig() {
}
@Override protected long newMemTableFactoryHandle() {
return newMemTableFactoryHandle0();
}
private native long newMemTableFactoryHandle0();
}

View File

@@ -0,0 +1,38 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Statistics to analyze the performance of a db. Pointer for statistics object
* is managed by Options class.
*/
public class Statistics {
private final long statsHandle_;
public Statistics(long statsHandle) {
statsHandle_ = statsHandle;
}
public long getTickerCount(TickerType tickerType) {
assert(isInitialized());
return getTickerCount0(tickerType.getValue(), statsHandle_);
}
public HistogramData geHistogramData(HistogramType histogramType) {
assert(isInitialized());
HistogramData hist = geHistogramData0(
histogramType.getValue(), statsHandle_);
return hist;
}
private boolean isInitialized() {
return (statsHandle_ != 0);
}
private native long getTickerCount0(int tickerType, long handle);
private native HistogramData geHistogramData0(int histogramType, long handle);
}

View File

@@ -0,0 +1,107 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Helper class to collect DB statistics periodically at a period specified in
* constructor. Callback function (provided in constructor) is called with
* every statistics collection.
*
* Caller should call start() to start statistics collection. Shutdown() should
* be called to stop stats collection and should be called before statistics (
* provided in constructor) reference has been disposed.
*/
public class StatisticsCollector {
private final List<StatsCollectorInput> _statsCollectorInputList;
private final ExecutorService _executorService;
private final int _statsCollectionInterval;
private volatile boolean _isRunning = true;
/**
* Constructor for statistics collector.
*
* @param statsCollectorInputList List of statistics collector input.
* @param statsCollectionIntervalInMilliSeconds Statistics collection time
* period (specified in milliseconds).
*/
public StatisticsCollector(List<StatsCollectorInput> statsCollectorInputList,
int statsCollectionIntervalInMilliSeconds) {
_statsCollectorInputList = statsCollectorInputList;
_statsCollectionInterval = statsCollectionIntervalInMilliSeconds;
_executorService = Executors.newSingleThreadExecutor();
}
public void start() {
_executorService.submit(collectStatistics());
}
/**
* Shuts down statistics collector.
*
* @param shutdownTimeout Time in milli-seconds to wait for shutdown before
* killing the collection process.
*/
public void shutDown(int shutdownTimeout) throws InterruptedException {
_isRunning = false;
_executorService.shutdownNow();
// Wait for collectStatistics runnable to finish so that disposal of
// statistics does not cause any exceptions to be thrown.
_executorService.awaitTermination(shutdownTimeout, TimeUnit.MILLISECONDS);
}
private Runnable collectStatistics() {
return new Runnable() {
@Override
public void run() {
while (_isRunning) {
try {
if(Thread.currentThread().isInterrupted()) {
break;
}
for(StatsCollectorInput statsCollectorInput :
_statsCollectorInputList) {
Statistics statistics = statsCollectorInput.getStatistics();
StatisticsCollectorCallback statsCallback =
statsCollectorInput.getCallback();
// Collect ticker data
for(TickerType ticker : TickerType.values()) {
long tickerValue = statistics.getTickerCount(ticker);
statsCallback.tickerCallback(ticker, tickerValue);
}
// Collect histogram data
for(HistogramType histogramType : HistogramType.values()) {
HistogramData histogramData =
statistics.geHistogramData(histogramType);
statsCallback.histogramCallback(histogramType, histogramData);
}
Thread.sleep(_statsCollectionInterval);
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
catch (Exception e) {
throw new RuntimeException("Error while calculating statistics", e);
}
}
}
};
}
}

View File

@@ -0,0 +1,34 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Callback interface provided to StatisticsCollector.
*
* Thread safety:
* StatisticsCollector doesn't make any guarantees about thread safety.
* If the same reference of StatisticsCollectorCallback is passed to multiple
* StatisticsCollector references, then its the responsibility of the
* user to make StatisticsCollectorCallback's implementation thread-safe.
*
* @param tickerType
* @param tickerCount
*/
public interface StatisticsCollectorCallback {
/**
* Callback function to get ticker values.
* @param tickerType Ticker type.
* @param tickerCount Value of ticker type.
*/
void tickerCallback(TickerType tickerType, long tickerCount);
/**
* Callback function to get histogram values.
* @param histType Histogram type.
* @param histData Histogram data.
*/
void histogramCallback(HistogramType histType, HistogramData histData);
}

View File

@@ -0,0 +1,35 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Contains all information necessary to collect statistics from one instance
* of DB statistics.
*/
public class StatsCollectorInput {
private final Statistics _statistics;
private final StatisticsCollectorCallback _statsCallback;
/**
* Constructor for StatsCollectorInput.
*
* @param statistics Reference of DB statistics.
* @param statsCallback Reference of statistics callback interface.
*/
public StatsCollectorInput(Statistics statistics,
StatisticsCollectorCallback statsCallback) {
_statistics = statistics;
_statsCallback = statsCallback;
}
public Statistics getStatistics() {
return _statistics;
}
public StatisticsCollectorCallback getCallback() {
return _statsCallback;
}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* TableFormatConfig is used to config the internal Table format of a RocksDB.
* To make a RocksDB to use a specific Table format, its associated
* TableFormatConfig should be properly set and passed into Options via
* Options.setTableFormatConfig() and open the db using that Options.
*/
public abstract class TableFormatConfig {
/**
* This function should only be called by Options.setTableFormatConfig(),
* which will create a c++ shared-pointer to the c++ TableFactory
* that associated with the Java TableFormatConfig.
*/
abstract protected long newTableFactoryHandle();
}

View File

@@ -0,0 +1,123 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public enum TickerType {
// total block cache misses
// REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
// BLOCK_CACHE_FILTER_MISS +
// BLOCK_CACHE_DATA_MISS;
BLOCK_CACHE_MISS(0),
// total block cache hit
// REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
// BLOCK_CACHE_FILTER_HIT +
// BLOCK_CACHE_DATA_HIT;
BLOCK_CACHE_HIT(1),
// # of blocks added to block cache.
BLOCK_CACHE_ADD(2),
// # of times cache miss when accessing index block from block cache.
BLOCK_CACHE_INDEX_MISS(3),
// # of times cache hit when accessing index block from block cache.
BLOCK_CACHE_INDEX_HIT(4),
// # of times cache miss when accessing filter block from block cache.
BLOCK_CACHE_FILTER_MISS(5),
// # of times cache hit when accessing filter block from block cache.
BLOCK_CACHE_FILTER_HIT(6),
// # of times cache miss when accessing data block from block cache.
BLOCK_CACHE_DATA_MISS(7),
// # of times cache hit when accessing data block from block cache.
BLOCK_CACHE_DATA_HIT(8),
// # of times bloom filter has avoided file reads.
BLOOM_FILTER_USEFUL(9),
// # of memtable hits.
MEMTABLE_HIT(10),
// # of memtable misses.
MEMTABLE_MISS(11),
/**
* COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
* There are 3 reasons currently.
*/
COMPACTION_KEY_DROP_NEWER_ENTRY(12), // key was written with a newer value.
COMPACTION_KEY_DROP_OBSOLETE(13), // The key is obsolete.
COMPACTION_KEY_DROP_USER(14), // user compaction function has dropped the key.
// Number of keys written to the database via the Put and Write call's
NUMBER_KEYS_WRITTEN(15),
// Number of Keys read,
NUMBER_KEYS_READ(16),
// Number keys updated, if inplace update is enabled
NUMBER_KEYS_UPDATED(17),
// Bytes written / read
BYTES_WRITTEN(18),
BYTES_READ(19),
NO_FILE_CLOSES(20),
NO_FILE_OPENS(21),
NO_FILE_ERRORS(22),
// Time system had to wait to do LO-L1 compactions
STALL_L0_SLOWDOWN_MICROS(23),
// Time system had to wait to move memtable to L1.
STALL_MEMTABLE_COMPACTION_MICROS(24),
// write throttle because of too many files in L0
STALL_L0_NUM_FILES_MICROS(25),
RATE_LIMIT_DELAY_MILLIS(26),
NO_ITERATORS(27), // number of iterators currently open
// Number of MultiGet calls, keys read, and bytes read
NUMBER_MULTIGET_CALLS(28),
NUMBER_MULTIGET_KEYS_READ(29),
NUMBER_MULTIGET_BYTES_READ(30),
// Number of deletes records that were not required to be
// written to storage because key does not exist
NUMBER_FILTERED_DELETES(31),
NUMBER_MERGE_FAILURES(32),
SEQUENCE_NUMBER(33),
// number of times bloom was checked before creating iterator on a
// file, and the number of times the check was useful in avoiding
// iterator creation (and thus likely IOPs).
BLOOM_FILTER_PREFIX_CHECKED(34),
BLOOM_FILTER_PREFIX_USEFUL(35),
// Number of times we had to reseek inside an iteration to skip
// over large number of keys with same userkey.
NUMBER_OF_RESEEKS_IN_ITERATION(36),
// Record the number of calls to GetUpadtesSince. Useful to keep track of
// transaction log iterator refreshes
GET_UPDATES_SINCE_CALLS(37),
BLOCK_CACHE_COMPRESSED_MISS(38), // miss in the compressed block cache
BLOCK_CACHE_COMPRESSED_HIT(39), // hit in the compressed block cache
WAL_FILE_SYNCED(40), // Number of times WAL sync is done
WAL_FILE_BYTES(41), // Number of bytes written to WAL
// Writes can be processed by requesting thread or by the thread at the
// head of the writers queue.
WRITE_DONE_BY_SELF(42),
WRITE_DONE_BY_OTHER(43),
WRITE_WITH_WAL(44), // Number of Write calls that request WAL
COMPACT_READ_BYTES(45), // Bytes read during compaction
COMPACT_WRITE_BYTES(46), // Bytes written during compaction
// Number of table's properties loaded directly from file, without creating
// table reader object.
NUMBER_DIRECT_LOAD_TABLE_PROPERTIES(47),
NUMBER_SUPERVERSION_ACQUIRES(48),
NUMBER_SUPERVERSION_RELEASES(49),
NUMBER_SUPERVERSION_CLEANUPS(50);
private final int value_;
private TickerType(int value) {
value_ = value;
}
public int getValue() {
return value_;
}
}

View File

@@ -0,0 +1,40 @@
package org.rocksdb;
/**
* The config for vector memtable representation.
*/
public class VectorMemTableConfig extends MemTableConfig {
public static final int DEFAULT_RESERVED_SIZE = 0;
public VectorMemTableConfig() {
reservedSize_ = DEFAULT_RESERVED_SIZE;
}
/**
* Set the initial size of the vector that will be used
* by the memtable created based on this config.
*
* @param size the initial size of the vector.
* @return the reference to the current config.
*/
public VectorMemTableConfig setReservedSize(int size) {
reservedSize_ = size;
return this;
}
/**
* Returns the initial size of the vector used by the memtable
* created based on this config.
*
* @return the initial size of the vector.
*/
public int reservedSize() {
return reservedSize_;
}
@Override protected long newMemTableFactoryHandle() {
return newMemTableFactoryHandle(reservedSize_);
}
private native long newMemTableFactoryHandle(long reservedSize);
private int reservedSize_;
}

View File

@@ -0,0 +1,112 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
import java.util.*;
/**
* WriteBatch holds a collection of updates to apply atomically to a DB.
*
* The updates are applied in the order in which they are added
* to the WriteBatch. For example, the value of "key" will be "v3"
* after the following batch is written:
*
* batch.put("key", "v1");
* batch.remove("key");
* batch.put("key", "v2");
* batch.put("key", "v3");
*
* Multiple threads can invoke const methods on a WriteBatch without
* external synchronization, but if any of the threads may call a
* non-const method, all threads accessing the same WriteBatch must use
* external synchronization.
*/
public class WriteBatch extends RocksObject {
public WriteBatch() {
super();
newWriteBatch(0);
}
public WriteBatch(int reserved_bytes) {
nativeHandle_ = 0;
newWriteBatch(reserved_bytes);
}
/**
* Returns the number of updates in the batch.
*/
public native int count();
/**
* Store the mapping "key->value" in the database.
*/
public void put(byte[] key, byte[] value) {
put(key, key.length, value, value.length);
}
/**
* Merge "value" with the existing value of "key" in the database.
* "key->merge(existing, value)"
*/
public void merge(byte[] key, byte[] value) {
merge(key, key.length, value, value.length);
}
/**
* If the database contains a mapping for "key", erase it. Else do nothing.
*/
public void remove(byte[] key) {
remove(key, key.length);
}
/**
* Append a blob of arbitrary size to the records in this batch. The blob will
* be stored in the transaction log but not in any other file. In particular,
* it will not be persisted to the SST files. When iterating over this
* WriteBatch, WriteBatch::Handler::LogData will be called with the contents
* of the blob as it is encountered. Blobs, puts, deletes, and merges will be
* encountered in the same order in thich they were inserted. The blob will
* NOT consume sequence number(s) and will NOT increase the count of the batch
*
* Example application: add timestamps to the transaction log for use in
* replication.
*/
public void putLogData(byte[] blob) {
putLogData(blob, blob.length);
}
/**
* Clear all updates buffered in this batch
*/
public native void clear();
/**
* Delete the c++ side pointer.
*/
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
private native void newWriteBatch(int reserved_bytes);
private native void put(byte[] key, int keyLen,
byte[] value, int valueLen);
private native void merge(byte[] key, int keyLen,
byte[] value, int valueLen);
private native void remove(byte[] key, int keyLen);
private native void putLogData(byte[] blob, int blobLen);
private native void disposeInternal(long handle);
}
/**
* Package-private class which provides java api to access
* c++ WriteBatchInternal.
*/
class WriteBatchInternal {
static native void setSequence(WriteBatch batch, long sn);
static native long sequence(WriteBatch batch);
static native void append(WriteBatch b1, WriteBatch b2);
}

View File

@@ -0,0 +1,124 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
package org.rocksdb;
import java.util.*;
import java.io.UnsupportedEncodingException;
/**
* This class mimics the db/write_batch_test.cc in the c++ rocksdb library.
*/
public class WriteBatchTest {
static {
RocksDB.loadLibrary();
}
public static void main(String args[]) {
System.out.println("Testing WriteBatchTest.Empty ===");
Empty();
System.out.println("Testing WriteBatchTest.Multiple ===");
Multiple();
System.out.println("Testing WriteBatchTest.Append ===");
Append();
System.out.println("Testing WriteBatchTest.Blob ===");
Blob();
// The following tests have not yet ported.
// Continue();
// PutGatherSlices();
System.out.println("Passed all WriteBatchTest!");
}
static void Empty() {
WriteBatch batch = new WriteBatch();
assert(batch.count() == 0);
}
static void Multiple() {
try {
WriteBatch batch = new WriteBatch();
batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
batch.remove("box".getBytes("US-ASCII"));
batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII"));
WriteBatchInternal.setSequence(batch, 100);
assert(100 == WriteBatchInternal.sequence(batch));
assert(3 == batch.count());
assert(new String("Put(baz, boo)@102" +
"Delete(box)@101" +
"Put(foo, bar)@100")
.equals(new String(getContents(batch), "US-ASCII")));
} catch (UnsupportedEncodingException e) {
System.err.println(e);
assert(false);
}
}
static void Append() {
WriteBatch b1 = new WriteBatch();
WriteBatch b2 = new WriteBatch();
WriteBatchInternal.setSequence(b1, 200);
WriteBatchInternal.setSequence(b2, 300);
WriteBatchInternal.append(b1, b2);
assert(getContents(b1).length == 0);
assert(b1.count() == 0);
try {
b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII"));
WriteBatchInternal.append(b1, b2);
assert("Put(a, va)@200".equals(new String(getContents(b1), "US-ASCII")));
assert(1 == b1.count());
b2.clear();
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII"));
WriteBatchInternal.append(b1, b2);
assert(new String("Put(a, va)@200" +
"Put(b, vb)@201")
.equals(new String(getContents(b1), "US-ASCII")));
assert(2 == b1.count());
b2.remove("foo".getBytes("US-ASCII"));
WriteBatchInternal.append(b1, b2);
assert(new String("Put(a, va)@200" +
"Put(b, vb)@202" +
"Put(b, vb)@201" +
"Delete(foo)@203")
.equals(new String(getContents(b1), "US-ASCII")));
assert(4 == b1.count());
} catch (UnsupportedEncodingException e) {
System.err.println(e);
assert(false);
}
}
static void Blob() {
WriteBatch batch = new WriteBatch();
try {
batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII"));
batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII"));
batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII"));
batch.putLogData("blob1".getBytes("US-ASCII"));
batch.remove("k2".getBytes("US-ASCII"));
batch.putLogData("blob2".getBytes("US-ASCII"));
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII"));
assert(5 == batch.count());
assert(new String("Merge(foo, bar)@4" +
"Put(k1, v1)@0" +
"Delete(k2)@3" +
"Put(k2, v2)@1" +
"Put(k3, v3)@2")
.equals(new String(getContents(batch), "US-ASCII")));
} catch (UnsupportedEncodingException e) {
System.err.println(e);
assert(false);
}
}
static native byte[] getContents(WriteBatch batch);
}

View File

@@ -0,0 +1,99 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Options that control write operations.
*
* Note that developers should call WriteOptions.dispose() to release the
* c++ side memory before a WriteOptions instance runs out of scope.
*/
public class WriteOptions extends RocksObject {
public WriteOptions() {
super();
newWriteOptions();
}
@Override protected void disposeInternal() {
assert(isInitialized());
disposeInternal(nativeHandle_);
}
/**
* If true, the write will be flushed from the operating system
* buffer cache (by calling WritableFile::Sync()) before the write
* is considered complete. If this flag is true, writes will be
* slower.
*
* If this flag is false, and the machine crashes, some recent
* writes may be lost. Note that if it is just the process that
* crashes (i.e., the machine does not reboot), no writes will be
* lost even if sync==false.
*
* In other words, a DB write with sync==false has similar
* crash semantics as the "write()" system call. A DB write
* with sync==true has similar crash semantics to a "write()"
* system call followed by "fdatasync()".
*
* Default: false
*
* @param flag a boolean flag to indicate whether a write
* should be synchronized.
* @return the instance of the current WriteOptions.
*/
public WriteOptions setSync(boolean flag) {
setSync(nativeHandle_, flag);
return this;
}
/**
* If true, the write will be flushed from the operating system
* buffer cache (by calling WritableFile::Sync()) before the write
* is considered complete. If this flag is true, writes will be
* slower.
*
* If this flag is false, and the machine crashes, some recent
* writes may be lost. Note that if it is just the process that
* crashes (i.e., the machine does not reboot), no writes will be
* lost even if sync==false.
*
* In other words, a DB write with sync==false has similar
* crash semantics as the "write()" system call. A DB write
* with sync==true has similar crash semantics to a "write()"
* system call followed by "fdatasync()".
*/
public boolean sync() {
return sync(nativeHandle_);
}
/**
* If true, writes will not first go to the write ahead log,
* and the write may got lost after a crash.
*
* @param flag a boolean flag to specify whether to disable
* write-ahead-log on writes.
* @return the instance of the current WriteOptions.
*/
public WriteOptions setDisableWAL(boolean flag) {
setDisableWAL(nativeHandle_, flag);
return this;
}
/**
* If true, writes will not first go to the write ahead log,
* and the write may got lost after a crash.
*/
public boolean disableWAL() {
return disableWAL(nativeHandle_);
}
private native void newWriteOptions();
private native void setSync(long handle, boolean flag);
private native boolean sync(long handle);
private native void setDisableWAL(long handle, boolean flag);
private native boolean disableWAL(long handle);
private native void disposeInternal(long handle);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,63 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import org.rocksdb.*;
public class BackupableDBTest {
static final String db_path = "/tmp/backupablejni_db";
static final String backup_path = "/tmp/backupablejni_db_backup";
static {
RocksDB.loadLibrary();
}
public static void main(String[] args) {
Options opt = new Options();
opt.setCreateIfMissing(true);
BackupableDBOptions bopt = new BackupableDBOptions(backup_path, false,
true, false, true, 0, 0);
BackupableDB bdb = null;
try {
bdb = BackupableDB.open(opt, bopt, db_path);
bdb.put("abc".getBytes(), "def".getBytes());
bdb.put("ghi".getBytes(), "jkl".getBytes());
bdb.createNewBackup(true);
// delete record after backup
bdb.remove("abc".getBytes());
byte[] value = bdb.get("abc".getBytes());
assert(value == null);
bdb.close();
// restore from backup
RestoreOptions ropt = new RestoreOptions(false);
RestoreBackupableDB rdb = new RestoreBackupableDB(bopt);
rdb.restoreDBFromLatestBackup(db_path, db_path,
ropt);
rdb.dispose();
ropt.dispose();
// verify that backed up data contains deleted record
bdb = BackupableDB.open(opt, bopt, db_path);
value = bdb.get("abc".getBytes());
assert(new String(value).equals("def"));
System.out.println("Backup and restore test passed");
} catch (RocksDBException e) {
System.err.format("[ERROR]: %s%n", e);
e.printStackTrace();
} finally {
opt.dispose();
bopt.dispose();
if (bdb != null) {
bdb.close();
}
}
}
}

View File

@@ -0,0 +1,424 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import java.util.Random;
import org.rocksdb.RocksDB;
import org.rocksdb.Options;
public class OptionsTest {
static {
RocksDB.loadLibrary();
}
public static void main(String[] args) {
Options opt = new Options();
Random rand = new Random();
{ // CreateIfMissing test
boolean boolValue = rand.nextBoolean();
opt.setCreateIfMissing(boolValue);
assert(opt.createIfMissing() == boolValue);
}
{ // ErrorIfExists test
boolean boolValue = rand.nextBoolean();
opt.setErrorIfExists(boolValue);
assert(opt.errorIfExists() == boolValue);
}
{ // ParanoidChecks test
boolean boolValue = rand.nextBoolean();
opt.setParanoidChecks(boolValue);
assert(opt.paranoidChecks() == boolValue);
}
{ // MaxOpenFiles test
int intValue = rand.nextInt();
opt.setMaxOpenFiles(intValue);
assert(opt.maxOpenFiles() == intValue);
}
{ // DisableDataSync test
boolean boolValue = rand.nextBoolean();
opt.setDisableDataSync(boolValue);
assert(opt.disableDataSync() == boolValue);
}
{ // UseFsync test
boolean boolValue = rand.nextBoolean();
opt.setUseFsync(boolValue);
assert(opt.useFsync() == boolValue);
}
{ // DbStatsLogInterval test
int intValue = rand.nextInt();
opt.setDbStatsLogInterval(intValue);
assert(opt.dbStatsLogInterval() == intValue);
}
{ // DbLogDir test
String str = "path/to/DbLogDir";
opt.setDbLogDir(str);
assert(opt.dbLogDir().equals(str));
}
{ // WalDir test
String str = "path/to/WalDir";
opt.setWalDir(str);
assert(opt.walDir().equals(str));
}
{ // DeleteObsoleteFilesPeriodMicros test
long longValue = rand.nextLong();
opt.setDeleteObsoleteFilesPeriodMicros(longValue);
assert(opt.deleteObsoleteFilesPeriodMicros() == longValue);
}
{ // MaxBackgroundCompactions test
int intValue = rand.nextInt();
opt.setMaxBackgroundCompactions(intValue);
assert(opt.maxBackgroundCompactions() == intValue);
}
{ // MaxBackgroundFlushes test
int intValue = rand.nextInt();
opt.setMaxBackgroundFlushes(intValue);
assert(opt.maxBackgroundFlushes() == intValue);
}
{ // MaxLogFileSize test
long longValue = rand.nextLong();
opt.setMaxLogFileSize(longValue);
assert(opt.maxLogFileSize() == longValue);
}
{ // LogFileTimeToRoll test
long longValue = rand.nextLong();
opt.setLogFileTimeToRoll(longValue);
assert(opt.logFileTimeToRoll() == longValue);
}
{ // KeepLogFileNum test
long longValue = rand.nextLong();
opt.setKeepLogFileNum(longValue);
assert(opt.keepLogFileNum() == longValue);
}
{ // MaxManifestFileSize test
long longValue = rand.nextLong();
opt.setMaxManifestFileSize(longValue);
assert(opt.maxManifestFileSize() == longValue);
}
{ // TableCacheNumshardbits test
int intValue = rand.nextInt();
opt.setTableCacheNumshardbits(intValue);
assert(opt.tableCacheNumshardbits() == intValue);
}
{ // TableCacheRemoveScanCountLimit test
int intValue = rand.nextInt();
opt.setTableCacheRemoveScanCountLimit(intValue);
assert(opt.tableCacheRemoveScanCountLimit() == intValue);
}
{ // WalTtlSeconds test
long longValue = rand.nextLong();
opt.setWalTtlSeconds(longValue);
assert(opt.walTtlSeconds() == longValue);
}
{ // ManifestPreallocationSize test
long longValue = rand.nextLong();
opt.setManifestPreallocationSize(longValue);
assert(opt.manifestPreallocationSize() == longValue);
}
{ // AllowOsBuffer test
boolean boolValue = rand.nextBoolean();
opt.setAllowOsBuffer(boolValue);
assert(opt.allowOsBuffer() == boolValue);
}
{ // AllowMmapReads test
boolean boolValue = rand.nextBoolean();
opt.setAllowMmapReads(boolValue);
assert(opt.allowMmapReads() == boolValue);
}
{ // AllowMmapWrites test
boolean boolValue = rand.nextBoolean();
opt.setAllowMmapWrites(boolValue);
assert(opt.allowMmapWrites() == boolValue);
}
{ // IsFdCloseOnExec test
boolean boolValue = rand.nextBoolean();
opt.setIsFdCloseOnExec(boolValue);
assert(opt.isFdCloseOnExec() == boolValue);
}
{ // SkipLogErrorOnRecovery test
boolean boolValue = rand.nextBoolean();
opt.setSkipLogErrorOnRecovery(boolValue);
assert(opt.skipLogErrorOnRecovery() == boolValue);
}
{ // StatsDumpPeriodSec test
int intValue = rand.nextInt();
opt.setStatsDumpPeriodSec(intValue);
assert(opt.statsDumpPeriodSec() == intValue);
}
{ // AdviseRandomOnOpen test
boolean boolValue = rand.nextBoolean();
opt.setAdviseRandomOnOpen(boolValue);
assert(opt.adviseRandomOnOpen() == boolValue);
}
{ // UseAdaptiveMutex test
boolean boolValue = rand.nextBoolean();
opt.setUseAdaptiveMutex(boolValue);
assert(opt.useAdaptiveMutex() == boolValue);
}
{ // BytesPerSync test
long longValue = rand.nextLong();
opt.setBytesPerSync(longValue);
assert(opt.bytesPerSync() == longValue);
}
{ // AllowThreadLocal test
boolean boolValue = rand.nextBoolean();
opt.setAllowThreadLocal(boolValue);
assert(opt.allowThreadLocal() == boolValue);
}
{ // WriteBufferSize test
long longValue = rand.nextLong();
opt.setWriteBufferSize(longValue);
assert(opt.writeBufferSize() == longValue);
}
{ // MaxWriteBufferNumber test
int intValue = rand.nextInt();
opt.setMaxWriteBufferNumber(intValue);
assert(opt.maxWriteBufferNumber() == intValue);
}
{ // MinWriteBufferNumberToMerge test
int intValue = rand.nextInt();
opt.setMinWriteBufferNumberToMerge(intValue);
assert(opt.minWriteBufferNumberToMerge() == intValue);
}
{ // BlockSize test
long longValue = rand.nextLong();
opt.setBlockSize(longValue);
assert(opt.blockSize() == longValue);
}
{ // BlockRestartInterval test
int intValue = rand.nextInt();
opt.setBlockRestartInterval(intValue);
assert(opt.blockRestartInterval() == intValue);
}
{ // WholeKeyFiltering test
boolean boolValue = rand.nextBoolean();
opt.setWholeKeyFiltering(boolValue);
assert(opt.wholeKeyFiltering() == boolValue);
}
{ // NumLevels test
int intValue = rand.nextInt();
opt.setNumLevels(intValue);
assert(opt.numLevels() == intValue);
}
{ // LevelFileNumCompactionTrigger test
int intValue = rand.nextInt();
opt.setLevelZeroFileNumCompactionTrigger(intValue);
assert(opt.levelZeroFileNumCompactionTrigger() == intValue);
}
{ // LevelSlowdownWritesTrigger test
int intValue = rand.nextInt();
opt.setLevelZeroSlowdownWritesTrigger(intValue);
assert(opt.levelZeroSlowdownWritesTrigger() == intValue);
}
{ // LevelStopWritesTrigger test
int intValue = rand.nextInt();
opt.setLevelZeroStopWritesTrigger(intValue);
assert(opt.levelZeroStopWritesTrigger() == intValue);
}
{ // MaxMemCompactionLevel test
int intValue = rand.nextInt();
opt.setMaxMemCompactionLevel(intValue);
assert(opt.maxMemCompactionLevel() == intValue);
}
{ // TargetFileSizeBase test
int intValue = rand.nextInt();
opt.setTargetFileSizeBase(intValue);
assert(opt.targetFileSizeBase() == intValue);
}
{ // TargetFileSizeMultiplier test
int intValue = rand.nextInt();
opt.setTargetFileSizeMultiplier(intValue);
assert(opt.targetFileSizeMultiplier() == intValue);
}
{ // MaxBytesForLevelBase test
long longValue = rand.nextLong();
opt.setMaxBytesForLevelBase(longValue);
assert(opt.maxBytesForLevelBase() == longValue);
}
{ // MaxBytesForLevelMultiplier test
int intValue = rand.nextInt();
opt.setMaxBytesForLevelMultiplier(intValue);
assert(opt.maxBytesForLevelMultiplier() == intValue);
}
{ // ExpandedCompactionFactor test
int intValue = rand.nextInt();
opt.setExpandedCompactionFactor(intValue);
assert(opt.expandedCompactionFactor() == intValue);
}
{ // SourceCompactionFactor test
int intValue = rand.nextInt();
opt.setSourceCompactionFactor(intValue);
assert(opt.sourceCompactionFactor() == intValue);
}
{ // MaxGrandparentOverlapFactor test
int intValue = rand.nextInt();
opt.setMaxGrandparentOverlapFactor(intValue);
assert(opt.maxGrandparentOverlapFactor() == intValue);
}
{ // DisableSeekCompaction test
boolean boolValue = rand.nextBoolean();
opt.setDisableSeekCompaction(boolValue);
assert(opt.disableSeekCompaction() == boolValue);
}
{ // SoftRateLimit test
double doubleValue = rand.nextDouble();
opt.setSoftRateLimit(doubleValue);
assert(opt.softRateLimit() == doubleValue);
}
{ // HardRateLimit test
double doubleValue = rand.nextDouble();
opt.setHardRateLimit(doubleValue);
assert(opt.hardRateLimit() == doubleValue);
}
{ // RateLimitDelayMaxMilliseconds test
int intValue = rand.nextInt();
opt.setRateLimitDelayMaxMilliseconds(intValue);
assert(opt.rateLimitDelayMaxMilliseconds() == intValue);
}
{ // NoBlockCache test
boolean boolValue = rand.nextBoolean();
opt.setNoBlockCache(boolValue);
assert(opt.noBlockCache() == boolValue);
}
{ // ArenaBlockSize test
long longValue = rand.nextLong();
opt.setArenaBlockSize(longValue);
assert(opt.arenaBlockSize() == longValue);
}
{ // DisableAutoCompactions test
boolean boolValue = rand.nextBoolean();
opt.setDisableAutoCompactions(boolValue);
assert(opt.disableAutoCompactions() == boolValue);
}
{ // PurgeRedundantKvsWhileFlush test
boolean boolValue = rand.nextBoolean();
opt.setPurgeRedundantKvsWhileFlush(boolValue);
assert(opt.purgeRedundantKvsWhileFlush() == boolValue);
}
{ // BlockSizeDeviation test
int intValue = rand.nextInt();
opt.setBlockSizeDeviation(intValue);
assert(opt.blockSizeDeviation() == intValue);
}
{ // VerifyChecksumsInCompaction test
boolean boolValue = rand.nextBoolean();
opt.setVerifyChecksumsInCompaction(boolValue);
assert(opt.verifyChecksumsInCompaction() == boolValue);
}
{ // FilterDeletes test
boolean boolValue = rand.nextBoolean();
opt.setFilterDeletes(boolValue);
assert(opt.filterDeletes() == boolValue);
}
{ // MaxSequentialSkipInIterations test
long longValue = rand.nextLong();
opt.setMaxSequentialSkipInIterations(longValue);
assert(opt.maxSequentialSkipInIterations() == longValue);
}
{ // InplaceUpdateSupport test
boolean boolValue = rand.nextBoolean();
opt.setInplaceUpdateSupport(boolValue);
assert(opt.inplaceUpdateSupport() == boolValue);
}
{ // InplaceUpdateNumLocks test
long longValue = rand.nextLong();
opt.setInplaceUpdateNumLocks(longValue);
assert(opt.inplaceUpdateNumLocks() == longValue);
}
{ // MemtablePrefixBloomBits test
int intValue = rand.nextInt();
opt.setMemtablePrefixBloomBits(intValue);
assert(opt.memtablePrefixBloomBits() == intValue);
}
{ // MemtablePrefixBloomProbes test
int intValue = rand.nextInt();
opt.setMemtablePrefixBloomProbes(intValue);
assert(opt.memtablePrefixBloomProbes() == intValue);
}
{ // BloomLocality test
int intValue = rand.nextInt();
opt.setBloomLocality(intValue);
assert(opt.bloomLocality() == intValue);
}
{ // MaxSuccessiveMerges test
long longValue = rand.nextLong();
opt.setMaxSuccessiveMerges(longValue);
assert(opt.maxSuccessiveMerges() == longValue);
}
{ // MinPartialMergeOperands test
int intValue = rand.nextInt();
opt.setMinPartialMergeOperands(intValue);
assert(opt.minPartialMergeOperands() == intValue);
}
opt.dispose();
System.out.println("Passed OptionsTest");
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import java.util.Random;
import org.rocksdb.RocksDB;
import org.rocksdb.ReadOptions;
public class ReadOptionsTest {
static {
RocksDB.loadLibrary();
}
public static void main(String[] args) {
ReadOptions opt = new ReadOptions();
Random rand = new Random();
{ // VerifyChecksums test
boolean boolValue = rand.nextBoolean();
opt.setVerifyChecksums(boolValue);
assert(opt.verifyChecksums() == boolValue);
}
{ // FillCache test
boolean boolValue = rand.nextBoolean();
opt.setFillCache(boolValue);
assert(opt.fillCache() == boolValue);
}
{ // Tailing test
boolean boolValue = rand.nextBoolean();
opt.setTailing(boolValue);
assert(opt.tailing() == boolValue);
}
opt.dispose();
System.out.println("Passed ReadOptionsTest");
}
}

View File

@@ -0,0 +1,43 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import java.util.Collections;
import org.rocksdb.*;
public class StatisticsCollectorTest {
static final String db_path = "/tmp/backupablejni_db";
static {
RocksDB.loadLibrary();
}
public static void main(String[] args)
throws InterruptedException, RocksDBException {
Options opt = new Options().createStatistics().setCreateIfMissing(true);
Statistics stats = opt.statisticsPtr();
RocksDB db = RocksDB.open(db_path);
StatsCallbackMock callback = new StatsCallbackMock();
StatsCollectorInput statsInput = new StatsCollectorInput(stats, callback);
StatisticsCollector statsCollector = new StatisticsCollector(
Collections.singletonList(statsInput), 100);
statsCollector.start();
Thread.sleep(1000);
assert(callback.tickerCallbackCount > 0);
assert(callback.histCallbackCount > 0);
statsCollector.shutDown(1000);
db.close();
opt.dispose();
System.out.println("Stats collector test passed.!");
}
}

View File

@@ -0,0 +1,22 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.test;
import org.rocksdb.*;
public class StatsCallbackMock implements StatisticsCollectorCallback {
public int tickerCallbackCount = 0;
public int histCallbackCount = 0;
public void tickerCallback(TickerType tickerType, long tickerCount) {
tickerCallbackCount++;
}
public void histogramCallback(HistogramType histType,
HistogramData histData) {
histCallbackCount++;
}
}

View File

@@ -0,0 +1,37 @@
package org.rocksdb.util;
public class Environment {
private static String OS = System.getProperty("os.name").toLowerCase();
public static boolean isWindows() {
return (OS.indexOf("win") >= 0);
}
public static boolean isMac() {
return (OS.indexOf("mac") >= 0);
}
public static boolean isUnix() {
return (OS.indexOf("nix") >= 0 ||
OS.indexOf("nux") >= 0 ||
OS.indexOf("aix") >= 0);
}
public static String getSharedLibraryName(String name) {
if (isUnix()) {
return String.format("lib%s.so", name);
} else if (isMac()) {
return String.format("lib%s.dylib", name);
}
throw new UnsupportedOperationException();
}
public static String getJniLibraryName(String name) {
if (isUnix()) {
return String.format("lib%s.so", name);
} else if (isMac()) {
return String.format("lib%s.jnilib", name);
}
throw new UnsupportedOperationException();
}
}

View File

@@ -0,0 +1,16 @@
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb.util;
public class SizeUnit {
public static final long KB = 1024L;
public static final long MB = KB * KB;
public static final long GB = KB * MB;
public static final long TB = KB * GB;
public static final long PB = KB * TB;
private SizeUnit() {}
}