mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Merge branch 'master' into columnfamilies
This commit is contained in:
@@ -311,6 +311,7 @@ extern void rocksdb_cache_destroy(rocksdb_cache_t* cache);
|
||||
|
||||
extern rocksdb_env_t* rocksdb_create_default_env();
|
||||
extern void rocksdb_env_set_background_threads(rocksdb_env_t* env, int n);
|
||||
extern void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n);
|
||||
extern void rocksdb_env_destroy(rocksdb_env_t*);
|
||||
|
||||
/* Universal Compaction options */
|
||||
|
||||
@@ -44,15 +44,15 @@ using std::shared_ptr;
|
||||
enum CompressionType : char {
|
||||
// NOTE: do not change the values of existing entries, as these are
|
||||
// part of the persistent format on disk.
|
||||
kNoCompression = 0x0,
|
||||
kNoCompression = 0x0,
|
||||
kSnappyCompression = 0x1,
|
||||
kZlibCompression = 0x2,
|
||||
kBZip2Compression = 0x3
|
||||
};
|
||||
|
||||
enum CompactionStyle : char {
|
||||
kCompactionStyleLevel = 0x0, // level based compaction style
|
||||
kCompactionStyleUniversal = 0x1 // Universal compaction style
|
||||
kCompactionStyleLevel = 0x0, // level based compaction style
|
||||
kCompactionStyleUniversal = 0x1 // Universal compaction style
|
||||
};
|
||||
|
||||
// Compression options for different compression algorithms like Zlib
|
||||
@@ -60,12 +60,9 @@ struct CompressionOptions {
|
||||
int window_bits;
|
||||
int level;
|
||||
int strategy;
|
||||
CompressionOptions():window_bits(-14),
|
||||
level(-1),
|
||||
strategy(0){}
|
||||
CompressionOptions(int wbits, int lev, int strategy):window_bits(wbits),
|
||||
level(lev),
|
||||
strategy(strategy){}
|
||||
CompressionOptions() : window_bits(-14), level(-1), strategy(0) {}
|
||||
CompressionOptions(int wbits, int lev, int strategy)
|
||||
: window_bits(wbits), level(lev), strategy(strategy) {}
|
||||
};
|
||||
|
||||
struct Options;
|
||||
@@ -180,7 +177,6 @@ struct ColumnFamilyOptions {
|
||||
// Default: 16
|
||||
int block_restart_interval;
|
||||
|
||||
|
||||
// Compress blocks using the specified compression algorithm. This
|
||||
// parameter can be changed dynamically.
|
||||
//
|
||||
@@ -211,7 +207,7 @@ struct ColumnFamilyOptions {
|
||||
// java/C api hard to construct.
|
||||
std::vector<CompressionType> compression_per_level;
|
||||
|
||||
//different options for compression algorithms
|
||||
// different options for compression algorithms
|
||||
CompressionOptions compression_opts;
|
||||
|
||||
// If non-nullptr, use the specified filter policy to reduce disk reads.
|
||||
@@ -290,7 +286,6 @@ struct ColumnFamilyOptions {
|
||||
// will be 20MB, total file size for level-2 will be 200MB,
|
||||
// and total file size for level-3 will be 2GB.
|
||||
|
||||
|
||||
// by default 'max_bytes_for_level_base' is 10MB.
|
||||
uint64_t max_bytes_for_level_base;
|
||||
// by default 'max_bytes_for_level_base' is 10.
|
||||
@@ -426,6 +421,17 @@ struct ColumnFamilyOptions {
|
||||
// Default: 10000, if inplace_update_support = true, else 0.
|
||||
size_t inplace_update_num_locks;
|
||||
|
||||
// Maximum number of successive merge operations on a key in the memtable.
|
||||
//
|
||||
// When a merge operation is added to the memtable and the maximum number of
|
||||
// successive merges is reached, the value of the key will be calculated and
|
||||
// inserted into the memtable instead of the merge operation. This will
|
||||
// ensure that there are never more than max_successive_merges merge
|
||||
// operations in the memtable.
|
||||
//
|
||||
// Default: 0 (disabled)
|
||||
size_t max_successive_merges;
|
||||
|
||||
// Create ColumnFamilyOptions with default values for all fields
|
||||
ColumnFamilyOptions();
|
||||
// Create ColumnFamilyOptions from Options
|
||||
@@ -560,6 +566,14 @@ struct DBOptions {
|
||||
// If <= 0, a proper value is automatically calculated (usually 1/10 of
|
||||
// writer_buffer_size).
|
||||
//
|
||||
// There are two additonal restriction of the The specified size:
|
||||
// (1) size should be in the range of [4096, 2 << 30] and
|
||||
// (2) be the multiple of the CPU word (which helps with the memory
|
||||
// alignment).
|
||||
//
|
||||
// We'll automatically check and adjust the size number to make sure it
|
||||
// conforms to the restrictions.
|
||||
//
|
||||
// Default: 0
|
||||
size_t arena_block_size;
|
||||
|
||||
@@ -614,7 +628,12 @@ struct DBOptions {
|
||||
// Specify the file access pattern once a compaction is started.
|
||||
// It will be applied to all input files of a compaction.
|
||||
// Default: NORMAL
|
||||
enum { NONE, NORMAL, SEQUENTIAL, WILLNEED } access_hint_on_compaction_start;
|
||||
enum {
|
||||
NONE,
|
||||
NORMAL,
|
||||
SEQUENTIAL,
|
||||
WILLNEED
|
||||
} access_hint_on_compaction_start;
|
||||
|
||||
// Use adaptive mutex, which spins in the user space before resorting
|
||||
// to kernel. This could reduce context switch when the mutex is not
|
||||
@@ -666,7 +685,7 @@ struct Options : public DBOptions, public ColumnFamilyOptions {
|
||||
// the block cache. It will not page in data from the OS cache or data that
|
||||
// resides in storage.
|
||||
enum ReadTier {
|
||||
kReadAllTier = 0x0, // data in memtable, block cache, OS cache or storage
|
||||
kReadAllTier = 0x0, // data in memtable, block cache, OS cache or storage
|
||||
kBlockCacheTier = 0x1 // data in memtable or block cache
|
||||
};
|
||||
|
||||
@@ -719,13 +738,14 @@ struct ReadOptions {
|
||||
prefix_seek(false),
|
||||
snapshot(nullptr),
|
||||
prefix(nullptr),
|
||||
read_tier(kReadAllTier) {
|
||||
}
|
||||
ReadOptions(bool cksum, bool cache) :
|
||||
verify_checksums(cksum), fill_cache(cache),
|
||||
prefix_seek(false), snapshot(nullptr), prefix(nullptr),
|
||||
read_tier(kReadAllTier) {
|
||||
}
|
||||
read_tier(kReadAllTier) {}
|
||||
ReadOptions(bool cksum, bool cache)
|
||||
: verify_checksums(cksum),
|
||||
fill_cache(cache),
|
||||
prefix_seek(false),
|
||||
snapshot(nullptr),
|
||||
prefix(nullptr),
|
||||
read_tier(kReadAllTier) {}
|
||||
};
|
||||
|
||||
// Options that control write operations
|
||||
@@ -752,10 +772,7 @@ struct WriteOptions {
|
||||
// and the write may got lost after a crash.
|
||||
bool disableWAL;
|
||||
|
||||
WriteOptions()
|
||||
: sync(false),
|
||||
disableWAL(false) {
|
||||
}
|
||||
WriteOptions() : sync(false), disableWAL(false) {}
|
||||
};
|
||||
|
||||
// Options that control flush operations
|
||||
@@ -764,9 +781,7 @@ struct FlushOptions {
|
||||
// Default: true
|
||||
bool wait;
|
||||
|
||||
FlushOptions()
|
||||
: wait(true) {
|
||||
}
|
||||
FlushOptions() : wait(true) {}
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
@@ -31,6 +31,14 @@ struct BackupableDBOptions {
|
||||
// Default: nullptr
|
||||
Env* backup_env;
|
||||
|
||||
// If share_table_files == true, backup will assume that table files with
|
||||
// same name have the same contents. This enables incremental backups and
|
||||
// avoids unnecessary data copies.
|
||||
// If share_table_files == false, each backup will be on its own and will
|
||||
// not share any data with other backups.
|
||||
// default: true
|
||||
bool share_table_files;
|
||||
|
||||
// Backup info and error messages will be written to info_log
|
||||
// if non-nullptr.
|
||||
// Default: nullptr
|
||||
@@ -49,6 +57,7 @@ struct BackupableDBOptions {
|
||||
|
||||
explicit BackupableDBOptions(const std::string& _backup_dir,
|
||||
Env* _backup_env = nullptr,
|
||||
bool _share_table_files = true,
|
||||
Logger* _info_log = nullptr,
|
||||
bool _sync = true,
|
||||
bool _destroy_old_data = false) :
|
||||
@@ -93,6 +102,14 @@ class BackupableDB : public StackableDB {
|
||||
Status PurgeOldBackups(uint32_t num_backups_to_keep);
|
||||
// deletes a specific backup
|
||||
Status DeleteBackup(BackupID backup_id);
|
||||
// Call this from another thread if you want to stop the backup
|
||||
// that is currently happening. It will return immediatelly, will
|
||||
// not wait for the backup to stop.
|
||||
// The backup will stop ASAP and the call to CreateNewBackup will
|
||||
// return Status::Incomplete(). It will not clean up after itself, but
|
||||
// the state will remain consistent. The state will be cleaned up
|
||||
// next time you create BackupableDB or RestoreBackupableDB.
|
||||
void StopBackup();
|
||||
|
||||
private:
|
||||
BackupEngine* backup_engine_;
|
||||
@@ -108,9 +125,10 @@ class RestoreBackupableDB {
|
||||
void GetBackupInfo(std::vector<BackupInfo>* backup_info);
|
||||
|
||||
// restore from backup with backup_id
|
||||
// IMPORTANT -- if you restore from some backup that is not the latest,
|
||||
// and you start creating new backups from the new DB, all the backups
|
||||
// that were newer than the backup you restored from will be deleted
|
||||
// IMPORTANT -- if options_.share_table_files == true and you restore DB
|
||||
// from some backup that is not the latest, and you start creating new
|
||||
// backups from the new DB, all the backups that were newer than the
|
||||
// backup you restored from will be deleted
|
||||
//
|
||||
// Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3.
|
||||
// If you try creating a new backup now, old backups 4 and 5 will be deleted
|
||||
|
||||
Reference in New Issue
Block a user