mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
1fdd726 Hotfix RocksDB 3.5 d67500a Add `make install` to Makefile in 3.5.fb. 4cb631a update HISTORY.md cfd0946 comments about the BlockBasedTableOptions migration in Options REVERT: 25888ae Merge pull request #329 from fyrz/master REVERT: 89833e5 Fixed signed-unsigned comparison warning in db_test.cc REVERT: fcac705 Fixed compile warning on Mac caused by unused variables. REVERT: b3343fd resolution for java build problem introduced by 5ec53f3edf62bec1b690ce12fb21a6c52203f3c8 REVERT: 187b299 ForwardIterator: update prev_key_ only if prefix hasn't changed REVERT: 5ec53f3 make compaction related options changeable REVERT: d122e7b Update INSTALL.md REVERT: 986dad0 Merge pull request #324 from dalgaaf/wip-da-SCA-20140930 REVERT: 8ee75dc db/memtable.cc: remove unused variable merge_result REVERT: 0fd8bbc db/db_impl.cc: reduce scope of prefix_initialized REVERT: 676ff7b compaction_picker.cc: remove check for >=0 for unsigned REVERT: e55aea5 document_db.cc: fix assert REVERT: d517c83 in_table_factory.cc: use correct format specifier REVERT: b140375 ttl/ttl_test.cc: prefer prefix ++operator for non-primitive types REVERT: 43c789c spatialdb/spatial_db.cc: use !empty() instead of 'size() > 0' REVERT: 0de452e document_db.cc: pass const parameter by reference REVERT: 4cc8643 util/ldb_cmd.cc: prefer prefix ++operator for non-primitive types REVERT: af8c2b2 util/signal_test.cc: suppress intentional null pointer deref REVERT: 33580fa db/db_impl.cc: fix object handling, remove double lines REVERT: 873f135 db_ttl_impl.h: pass func parameter by reference REVERT: 8558457 ldb_cmd_execute_result.h: perform init in initialization list REVERT: 063471b table/table_test.cc: pass func parameter by reference REVERT: 93548ce table/cuckoo_table_reader.cc: pass func parameter by ref REVERT: b8b7117 db/version_set.cc: use !empty() instead of 'size() > 0' REVERT: 8ce050b table/bloom_block.*: pass func parameter by reference REVERT: 53910dd db_test.cc: pass parameter by reference REVERT: 68ca534 corruption_test.cc: pass parameter by reference REVERT: 7506198 cuckoo_table_db_test.cc: add flush after delete REVERT: 1f96330 Print MB per second compaction throughput separately for reads and writes REVERT: ffe3d49 Add an instruction about SSE in INSTALL.md REVERT: ee1f3cc Package generation for Ubuntu and CentOS REVERT: f0f7955 Fixing comile errors on OS X REVERT: 99fb613 remove 2 space linter REVERT: b2d64a4 Fix linters, second try REVERT: 747523d Print per column family metrics in db_bench REVERT: 56ebd40 Fix arc lint (should fix #238) REVERT: 637f891 Merge pull request #321 from eonnen/master REVERT: 827e31c Make test use a compatible type in the size checks. REVERT: fd5d80d CompactedDB: log using the correct info_log REVERT: 2faf49d use GetContext to replace callback function pointer REVERT: 983d2de Add AUTHORS file. Fix #203 REVERT: abd70c5 Merge pull request #316 from fyrz/ReverseBytewiseComparator REVERT: 2dc6f62 handle kDelete type in cuckoo builder REVERT: 8b8011a Changed name of ReverseBytewiseComparator based on review comment REVERT: 389edb6 universal compaction picker: use double for potential overflow REVERT: 5340484 Built-in comparator(s) in RocksJava REVERT: d439451 delay initialization of cuckoo table iterator REVERT: 94997ea reduce memory usage of cuckoo table builder REVERT: c627595 improve memory efficiency of cuckoo reader REVERT: 581442d option to choose module when calculating CuckooTable hash REVERT: fbd2daf CompactedDBImpl::MultiGet() for better CuckooTable performance REVERT: 3c68006 CompactedDBImpl REVERT: f7375f3 Fix double deletes REVERT: 21ddcf6 Remove allow_thread_local REVERT: fb4a492 Merge pull request #311 from ankgup87/master REVERT: 611e286 Merge branch 'master' of https://github.com/facebook/rocksdb REVERT: 0103b44 Merge branch 'master' of ssh://github.com/ankgup87/rocksdb REVERT: 1dfb7bb Add block based table config options REVERT: cdaf44f Enlarge log size cap when printing file summary REVERT: 7cc1ed7 Merge pull request #309 from naveenatceg/staticbuild REVERT: ba6d660 Resolving merge conflict REVERT: 51eeaf6 Addressing review comments REVERT: fd7d3fe Addressing review comments (adding a env variable to override temp directory) REVERT: cf7ace8 Addressing review comments REVERT: 0a29ce5 re-enable BlockBasedTable::SetupForCompaction() REVERT: 55af370 Remove TODO for checking index checksums REVERT: 3d74f09 Fix compile REVERT: 53b0039 Fix release compile REVERT: d0de413 WriteBatchWithIndex to allow different Comparators for different column families REVERT: 57a32f1 change target_file_size_base to uint64_t REVERT: 5e6aee4 dont create backup_input if compaction filter v2 is not used REVERT: 49b5f94 Merge pull request #306 from Liuchang0812/fix_cast REVERT: 787cb4d remove cast, replace %llu with % PRIu64 REVERT: a7574d4 Update logging.cc REVERT: 7e0dcb9 Update logging.cc REVERT: 57fa3cc Merge pull request #304 from Liuchang0812/fix-check REVERT: cd44522 Merge pull request #305 from Liuchang0812/fix-logging REVERT: 6a031b6 remove unused variable REVERT: 4436f17 fixed #303: replace %ld with % PRId64 REVERT: 7a1bd05 Merge pull request #302 from ankgup87/master REVERT: 423e52c Merge branch 'master' of https://github.com/facebook/rocksdb REVERT: bfeef94 Add rate limiter REVERT: 32f2532 Print compression_size_percent as a signed int REVERT: 976caca Skip AllocateTest if fallocate() is not supported in the file system REVERT: 3b897cd Enable no-fbcode RocksDB build REVERT: f445947 RocksDB: Format uint64 using PRIu64 in db_impl.cc REVERT: e17bc65 Merge pull request #299 from ankgup87/master REVERT: b93797a Fix build REVERT: adae3ca [Java] Fix JNI link error caused by the removal of options.db_stats_log_interval REVERT: 90b8c07 Fix unit tests errors REVERT: 51af7c3 CuckooTable: add one option to allow identity function for the first hash function REVERT: 0350435 Fixed a signed-unsigned comparison in spatial_db.cc -- issue #293 REVERT: 2fb1fea Fix syncronization issues REVERT: ff76895 Remove some unnecessary constructors REVERT: feadb9d fix cuckoo table builder test REVERT: 3c232e1 Fix mac compile REVERT: 54cada9 Run make format on PR #249 REVERT: 27b22f1 Merge pull request #249 from tdfischer/decompression-refactoring REVERT: fb6456b Replace naked calls to operator new and delete (Fixes #222) REVERT: 5600c8f cuckoo table: return estimated size - 1 REVERT: a062e1f SetOptions() for memtable related options REVERT: e4eca6a Options conversion function for convenience REVERT: a7c2094 Merge pull request #292 from saghmrossi/master REVERT: 4d05234 Merge branch 'master' of github.com:saghmrossi/rocksdb REVERT: 60a4aa1 Test use_mmap_reads REVERT: 94e43a1 [Java] Fixed 32-bit overflowing issue when converting jlong to size_t REVERT: f9eaaa6 added include for inttypes.h to fix nonworking printf statements REVERT: f090575 Replaced "built on on earlier work" by "built on earlier work" in README.md REVERT: faad439 Fix #284 REVERT: 49aacd8 Fix make install REVERT: acb9348 [Java] Include WriteBatch into RocksDBSample.java, fix how DbBenchmark.java handles WriteBatch. REVERT: 4a27a2f Don't sync manifest when disableDataSync = true REVERT: 9b8480d Merge pull request #287 from yinqiwen/rate-limiter-crash-fix REVERT: 28be16b fix rate limiter crash #286 REVERT: 04ce1b2 Fix #284 REVERT: add22e3 standardize scripts to run RocksDB benchmarks REVERT: dee91c2 WriteThread REVERT: 540a257 Fix WAL synced REVERT: 24f034b Merge pull request #282 from Chilledheart/develop REVERT: 49fe329 Fix build issue under macosx REVERT: ebb5c65 Add make install REVERT: 0352a9f add_wrapped_bloom_test REVERT: 9c0e66c Don't run background jobs (flush, compactions) when bg_error_ is set REVERT: a9639bd Fix valgrind test REVERT: d1f24dc Relax FlushSchedule test REVERT: 3d9e6f7 Push model for flushing memtables REVERT: 059e584 [unit test] CompactRange should fail if we don't have space REVERT: dd641b2 fix RocksDB java build REVERT: 53404d9 add_qps_info_in cache bench REVERT: a52cecb Fix Mac compile REVERT: 092f97e Fix comments and typos REVERT: 6cc1286 Added a few statistics for BackupableDB REVERT: 0a42295 Fix SimpleWriteTimeoutTest REVERT: 06d9862 Always pass MergeContext as pointer, not reference REVERT: d343c3f Improve db recovery REVERT: 6bb7e3e Merger test REVERT: 88841bd Explicitly cast char to signed char in Hash() REVERT: 5231146 MemTableOptions REVERT: 1d284db Addressing review comments REVERT: 55114e7 Some updates for SpatialDB REVERT: 171d4ff remove TailingIterator reference in db_impl.h REVERT: 9b0f7ff rename version_set options_ to db_options_ to avoid confusion REVERT: 2d57828 Check stop level trigger-0 before slowdown level-0 trigger REVERT: 659d2d5 move compaction_filter to immutable_options REVERT: 048560a reduce references to cfd->options() in DBImpl REVERT: 011241b DB::Flush() Do not wait for background threads when there is nothing in mem table REVERT: a2bb7c3 Push- instead of pull-model for managing Write stalls REVERT: 0af157f Implement full filter for block based table. REVERT: 9360cc6 Fix valgrind issue REVERT: 02d5bff Merge pull request #277 from wankai/master REVERT: 88a2f44 fix comments REVERT: 7c16e39 Merge pull request #276 from wankai/master REVERT: 8237738 replace hard-coded number with named variable REVERT: db8ca52 Merge pull request #273 from nbougalis/static-analysis REVERT: b7b031f Merge pull request #274 from wankai/master REVERT: 4c2b1f0 Merge remote-tracking branch 'upstream/master' REVERT: a5d2863 typo improvement REVERT: 9f8aa09 Don't leak data returned by opendir REVERT: d1cfb71 Remove unused member(s) REVERT: bfee319 sizeof(int*) where sizeof(int) was intended REVERT: d40c1f7 Add missing break statement REVERT: 2e97c38 Avoid off-by-one error when using readlink REVERT: 40ddc3d add cache bench REVERT: 9f1c80b Drop column family from write thread REVERT: 8de151b Add db_bench with lots of column families to regression tests REVERT: c9e419c rename options_ to db_options_ in DBImpl to avoid confusion REVERT: 5cd0576 Fix compaction bug in Cuckoo Table Builder. Use kvs_.size() instead of num_entries in FileSize() method. REVERT: 0fbb3fa fixed memory leak in unit test DBIteratorBoundTest REVERT: adcd253 fix asan check REVERT: 4092b7a Merge pull request #272 from project-zerus/patch-1 REVERT: bb6ae0f fix more compile warnings REVERT: 6d31441 Merge pull request #271 from nbougalis/cleanups REVERT: 0cd0ec4 Plug memory leak during index creation REVERT: 4329d74 Fix swapped variable names to accurately reflect usage REVERT: 45a5e3e Remove path with arena==nullptr from NewInternalIterator REVERT: 5665e5e introduce ImmutableOptions REVERT: e0b99d4 created a new ReadOptions parameter 'iterate_upper_bound' REVERT: 51ea889 Fix travis builds REVERT: a481626 Relax backupable rate limiting test REVERT: f7f973d Merge pull request #269 from huahang/patch-2 REVERT: ef5b384 fix a few compile warnings REVERT: 2fd3806 Merge pull request #263 from wankai/master REVERT: 1785114 delete unused Comparator REVERT: 1b1d961 update HISTORY.md REVERT: 703c3ea comments about the BlockBasedTableOptions migration in Options REVERT: 4b5ad88 Merge pull request #260 from wankai/master REVERT: 19cc588 change to filter_block std::unique_ptr support RAII REVERT: 9b976e3 Merge pull request #259 from wankai/master REVERT: 5d25a46 Merge remote-tracking branch 'upstream/master' REVERT: dff2b1a typo improvement REVERT: 343e98a Reverting import change REVERT: ddb8039 RocksDB static build Make file changes to download and build the dependencies .Load the shared library when RocksDB is initialized git-subtree-dir: src/rocksdb2 git-subtree-split: 1fdd726a8254c13d0c66d8db8130ad17c13d7bcc
447 lines
15 KiB
C++
447 lines
15 KiB
C++
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "table/format.h"
|
|
|
|
#include <string>
|
|
#include <inttypes.h>
|
|
|
|
#include "port/port.h"
|
|
#include "rocksdb/env.h"
|
|
#include "table/block.h"
|
|
#include "util/coding.h"
|
|
#include "util/crc32c.h"
|
|
#include "util/perf_context_imp.h"
|
|
#include "util/xxhash.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
#else
|
|
// ROCKSDB_LITE doesn't have plain table
|
|
const uint64_t kLegacyPlainTableMagicNumber = 0;
|
|
const uint64_t kPlainTableMagicNumber = 0;
|
|
#endif
|
|
const uint32_t DefaultStackBufferSize = 5000;
|
|
|
|
void BlockHandle::EncodeTo(std::string* dst) const {
|
|
// Sanity check that all fields have been set
|
|
assert(offset_ != ~static_cast<uint64_t>(0));
|
|
assert(size_ != ~static_cast<uint64_t>(0));
|
|
PutVarint64(dst, offset_);
|
|
PutVarint64(dst, size_);
|
|
}
|
|
|
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
|
if (GetVarint64(input, &offset_) &&
|
|
GetVarint64(input, &size_)) {
|
|
return Status::OK();
|
|
} else {
|
|
return Status::Corruption("bad block handle");
|
|
}
|
|
}
|
|
const BlockHandle BlockHandle::kNullBlockHandle(0, 0);
|
|
|
|
// legacy footer format:
|
|
// metaindex handle (varint64 offset, varint64 size)
|
|
// index handle (varint64 offset, varint64 size)
|
|
// <padding> to make the total size 2 * BlockHandle::kMaxEncodedLength
|
|
// table_magic_number (8 bytes)
|
|
// new footer format:
|
|
// checksum (char, 1 byte)
|
|
// metaindex handle (varint64 offset, varint64 size)
|
|
// index handle (varint64 offset, varint64 size)
|
|
// <padding> to make the total size 2 * BlockHandle::kMaxEncodedLength + 1
|
|
// footer version (4 bytes)
|
|
// table_magic_number (8 bytes)
|
|
void Footer::EncodeTo(std::string* dst) const {
|
|
if (version() == kLegacyFooter) {
|
|
// has to be default checksum with legacy footer
|
|
assert(checksum_ == kCRC32c);
|
|
const size_t original_size = dst->size();
|
|
metaindex_handle_.EncodeTo(dst);
|
|
index_handle_.EncodeTo(dst);
|
|
dst->resize(original_size + 2 * BlockHandle::kMaxEncodedLength); // Padding
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() & 0xffffffffu));
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() >> 32));
|
|
assert(dst->size() == original_size + kVersion0EncodedLength);
|
|
} else {
|
|
const size_t original_size = dst->size();
|
|
dst->push_back(static_cast<char>(checksum_));
|
|
metaindex_handle_.EncodeTo(dst);
|
|
index_handle_.EncodeTo(dst);
|
|
dst->resize(original_size + kVersion1EncodedLength - 12); // Padding
|
|
PutFixed32(dst, kFooterVersion);
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() & 0xffffffffu));
|
|
PutFixed32(dst, static_cast<uint32_t>(table_magic_number() >> 32));
|
|
assert(dst->size() == original_size + kVersion1EncodedLength);
|
|
}
|
|
}
|
|
|
|
namespace {
|
|
inline bool IsLegacyFooterFormat(uint64_t magic_number) {
|
|
return magic_number == kLegacyBlockBasedTableMagicNumber ||
|
|
magic_number == kLegacyPlainTableMagicNumber;
|
|
}
|
|
|
|
inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) {
|
|
if (magic_number == kLegacyBlockBasedTableMagicNumber) {
|
|
return kBlockBasedTableMagicNumber;
|
|
}
|
|
if (magic_number == kLegacyPlainTableMagicNumber) {
|
|
return kPlainTableMagicNumber;
|
|
}
|
|
assert(false);
|
|
return 0;
|
|
}
|
|
} // namespace
|
|
|
|
Footer::Footer(uint64_t table_magic_number)
|
|
: version_(IsLegacyFooterFormat(table_magic_number) ? kLegacyFooter
|
|
: kFooterVersion),
|
|
checksum_(kCRC32c),
|
|
table_magic_number_(table_magic_number) {}
|
|
|
|
Status Footer::DecodeFrom(Slice* input) {
|
|
assert(input != nullptr);
|
|
assert(input->size() >= kMinEncodedLength);
|
|
|
|
const char *magic_ptr =
|
|
input->data() + input->size() - kMagicNumberLengthByte;
|
|
const uint32_t magic_lo = DecodeFixed32(magic_ptr);
|
|
const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4);
|
|
uint64_t magic = ((static_cast<uint64_t>(magic_hi) << 32) |
|
|
(static_cast<uint64_t>(magic_lo)));
|
|
|
|
// We check for legacy formats here and silently upconvert them
|
|
bool legacy = IsLegacyFooterFormat(magic);
|
|
if (legacy) {
|
|
magic = UpconvertLegacyFooterFormat(magic);
|
|
}
|
|
if (HasInitializedTableMagicNumber()) {
|
|
if (magic != table_magic_number()) {
|
|
char buffer[80];
|
|
snprintf(buffer, sizeof(buffer) - 1,
|
|
"not an sstable (bad magic number --- %lx)",
|
|
(long)magic);
|
|
return Status::InvalidArgument(buffer);
|
|
}
|
|
} else {
|
|
set_table_magic_number(magic);
|
|
}
|
|
|
|
if (legacy) {
|
|
// The size is already asserted to be at least kMinEncodedLength
|
|
// at the beginning of the function
|
|
input->remove_prefix(input->size() - kVersion0EncodedLength);
|
|
version_ = kLegacyFooter;
|
|
checksum_ = kCRC32c;
|
|
} else {
|
|
version_ = DecodeFixed32(magic_ptr - 4);
|
|
if (version_ != kFooterVersion) {
|
|
return Status::Corruption("bad footer version");
|
|
}
|
|
// Footer version 1 will always occupy exactly this many bytes.
|
|
// It consists of the checksum type, two block handles, padding,
|
|
// a version number, and a magic number
|
|
if (input->size() < kVersion1EncodedLength) {
|
|
return Status::InvalidArgument("input is too short to be an sstable");
|
|
} else {
|
|
input->remove_prefix(input->size() - kVersion1EncodedLength);
|
|
}
|
|
uint32_t checksum;
|
|
if (!GetVarint32(input, &checksum)) {
|
|
return Status::Corruption("bad checksum type");
|
|
}
|
|
checksum_ = static_cast<ChecksumType>(checksum);
|
|
}
|
|
|
|
Status result = metaindex_handle_.DecodeFrom(input);
|
|
if (result.ok()) {
|
|
result = index_handle_.DecodeFrom(input);
|
|
}
|
|
if (result.ok()) {
|
|
// We skip over any leftover data (just padding for now) in "input"
|
|
const char* end = magic_ptr + kMagicNumberLengthByte;
|
|
*input = Slice(end, input->data() + input->size() - end);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
Status ReadFooterFromFile(RandomAccessFile* file,
|
|
uint64_t file_size,
|
|
Footer* footer) {
|
|
if (file_size < Footer::kMinEncodedLength) {
|
|
return Status::InvalidArgument("file is too short to be an sstable");
|
|
}
|
|
|
|
char footer_space[Footer::kMaxEncodedLength];
|
|
Slice footer_input;
|
|
size_t read_offset = (file_size > Footer::kMaxEncodedLength)
|
|
? (file_size - Footer::kMaxEncodedLength)
|
|
: 0;
|
|
Status s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input,
|
|
footer_space);
|
|
if (!s.ok()) return s;
|
|
|
|
// Check that we actually read the whole footer from the file. It may be
|
|
// that size isn't correct.
|
|
if (footer_input.size() < Footer::kMinEncodedLength) {
|
|
return Status::InvalidArgument("file is too short to be an sstable");
|
|
}
|
|
|
|
return footer->DecodeFrom(&footer_input);
|
|
}
|
|
|
|
// Read a block and check its CRC
|
|
// contents is the result of reading.
|
|
// According to the implementation of file->Read, contents may not point to buf
|
|
Status ReadBlock(RandomAccessFile* file, const Footer& footer,
|
|
const ReadOptions& options, const BlockHandle& handle,
|
|
Slice* contents, /* result of reading */ char* buf) {
|
|
size_t n = static_cast<size_t>(handle.size());
|
|
Status s;
|
|
|
|
{
|
|
PERF_TIMER_GUARD(block_read_time);
|
|
s = file->Read(handle.offset(), n + kBlockTrailerSize, contents, buf);
|
|
}
|
|
|
|
PERF_COUNTER_ADD(block_read_count, 1);
|
|
PERF_COUNTER_ADD(block_read_byte, n + kBlockTrailerSize);
|
|
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
if (contents->size() != n + kBlockTrailerSize) {
|
|
return Status::Corruption("truncated block read");
|
|
}
|
|
|
|
// Check the crc of the type and the block contents
|
|
const char* data = contents->data(); // Pointer to where Read put the data
|
|
if (options.verify_checksums) {
|
|
PERF_TIMER_GUARD(block_checksum_time);
|
|
uint32_t value = DecodeFixed32(data + n + 1);
|
|
uint32_t actual = 0;
|
|
switch (footer.checksum()) {
|
|
case kCRC32c:
|
|
value = crc32c::Unmask(value);
|
|
actual = crc32c::Value(data, n + 1);
|
|
break;
|
|
case kxxHash:
|
|
actual = XXH32(data, n + 1, 0);
|
|
break;
|
|
default:
|
|
s = Status::Corruption("unknown checksum type");
|
|
}
|
|
if (s.ok() && actual != value) {
|
|
s = Status::Corruption("block checksum mismatch");
|
|
}
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
}
|
|
return s;
|
|
}
|
|
|
|
// Decompress a block according to params
|
|
// May need to malloc a space for cache usage
|
|
Status DecompressBlock(BlockContents* result, size_t block_size,
|
|
bool do_uncompress, const char* buf,
|
|
const Slice& contents, bool use_stack_buf) {
|
|
Status s;
|
|
size_t n = block_size;
|
|
const char* data = contents.data();
|
|
|
|
result->data = Slice();
|
|
result->cachable = false;
|
|
result->heap_allocated = false;
|
|
|
|
PERF_TIMER_GUARD(block_decompress_time);
|
|
rocksdb::CompressionType compression_type =
|
|
static_cast<rocksdb::CompressionType>(data[n]);
|
|
// If the caller has requested that the block not be uncompressed
|
|
if (!do_uncompress || compression_type == kNoCompression) {
|
|
if (data != buf) {
|
|
// File implementation gave us pointer to some other data.
|
|
// Use it directly under the assumption that it will be live
|
|
// while the file is open.
|
|
result->data = Slice(data, n);
|
|
result->heap_allocated = false;
|
|
result->cachable = false; // Do not double-cache
|
|
} else {
|
|
if (use_stack_buf) {
|
|
// Need to allocate space in heap for cache usage
|
|
char* new_buf = new char[n];
|
|
memcpy(new_buf, buf, n);
|
|
result->data = Slice(new_buf, n);
|
|
} else {
|
|
result->data = Slice(buf, n);
|
|
}
|
|
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
}
|
|
result->compression_type = compression_type;
|
|
s = Status::OK();
|
|
} else {
|
|
s = UncompressBlockContents(data, n, result);
|
|
}
|
|
return s;
|
|
}
|
|
|
|
// Read and Decompress block
|
|
// Use buf in stack as temp reading buffer
|
|
Status ReadAndDecompressFast(RandomAccessFile* file, const Footer& footer,
|
|
const ReadOptions& options,
|
|
const BlockHandle& handle, BlockContents* result,
|
|
Env* env, bool do_uncompress) {
|
|
Status s;
|
|
Slice contents;
|
|
size_t n = static_cast<size_t>(handle.size());
|
|
char buf[DefaultStackBufferSize];
|
|
|
|
s = ReadBlock(file, footer, options, handle, &contents, buf);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
s = DecompressBlock(result, n, do_uncompress, buf, contents, true);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
return s;
|
|
}
|
|
|
|
// Read and Decompress block
|
|
// Use buf in heap as temp reading buffer
|
|
Status ReadAndDecompress(RandomAccessFile* file, const Footer& footer,
|
|
const ReadOptions& options, const BlockHandle& handle,
|
|
BlockContents* result, Env* env, bool do_uncompress) {
|
|
Status s;
|
|
Slice contents;
|
|
size_t n = static_cast<size_t>(handle.size());
|
|
char* buf = new char[n + kBlockTrailerSize];
|
|
|
|
s = ReadBlock(file, footer, options, handle, &contents, buf);
|
|
if (!s.ok()) {
|
|
delete[] buf;
|
|
return s;
|
|
}
|
|
s = DecompressBlock(result, n, do_uncompress, buf, contents, false);
|
|
if (!s.ok()) {
|
|
delete[] buf;
|
|
return s;
|
|
}
|
|
|
|
if (result->data.data() != buf) {
|
|
delete[] buf;
|
|
}
|
|
return s;
|
|
}
|
|
|
|
Status ReadBlockContents(RandomAccessFile* file, const Footer& footer,
|
|
const ReadOptions& options, const BlockHandle& handle,
|
|
BlockContents* result, Env* env, bool do_uncompress) {
|
|
size_t n = static_cast<size_t>(handle.size());
|
|
if (do_uncompress && n + kBlockTrailerSize < DefaultStackBufferSize) {
|
|
return ReadAndDecompressFast(file, footer, options, handle, result, env,
|
|
do_uncompress);
|
|
} else {
|
|
return ReadAndDecompress(file, footer, options, handle, result, env,
|
|
do_uncompress);
|
|
}
|
|
}
|
|
|
|
//
|
|
// The 'data' points to the raw block contents that was read in from file.
|
|
// This method allocates a new heap buffer and the raw block
|
|
// contents are uncompresed into this buffer. This
|
|
// buffer is returned via 'result' and it is upto the caller to
|
|
// free this buffer.
|
|
Status UncompressBlockContents(const char* data, size_t n,
|
|
BlockContents* result) {
|
|
char* ubuf = nullptr;
|
|
int decompress_size = 0;
|
|
assert(data[n] != kNoCompression);
|
|
switch (data[n]) {
|
|
case kSnappyCompression: {
|
|
size_t ulength = 0;
|
|
static char snappy_corrupt_msg[] =
|
|
"Snappy not supported or corrupted Snappy compressed block contents";
|
|
if (!port::Snappy_GetUncompressedLength(data, n, &ulength)) {
|
|
return Status::Corruption(snappy_corrupt_msg);
|
|
}
|
|
ubuf = new char[ulength];
|
|
if (!port::Snappy_Uncompress(data, n, ubuf)) {
|
|
delete[] ubuf;
|
|
return Status::Corruption(snappy_corrupt_msg);
|
|
}
|
|
result->data = Slice(ubuf, ulength);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
}
|
|
case kZlibCompression:
|
|
ubuf = port::Zlib_Uncompress(data, n, &decompress_size);
|
|
static char zlib_corrupt_msg[] =
|
|
"Zlib not supported or corrupted Zlib compressed block contents";
|
|
if (!ubuf) {
|
|
return Status::Corruption(zlib_corrupt_msg);
|
|
}
|
|
result->data = Slice(ubuf, decompress_size);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
case kBZip2Compression:
|
|
ubuf = port::BZip2_Uncompress(data, n, &decompress_size);
|
|
static char bzip2_corrupt_msg[] =
|
|
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
|
|
if (!ubuf) {
|
|
return Status::Corruption(bzip2_corrupt_msg);
|
|
}
|
|
result->data = Slice(ubuf, decompress_size);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
case kLZ4Compression:
|
|
ubuf = port::LZ4_Uncompress(data, n, &decompress_size);
|
|
static char lz4_corrupt_msg[] =
|
|
"LZ4 not supported or corrupted LZ4 compressed block contents";
|
|
if (!ubuf) {
|
|
return Status::Corruption(lz4_corrupt_msg);
|
|
}
|
|
result->data = Slice(ubuf, decompress_size);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
case kLZ4HCCompression:
|
|
ubuf = port::LZ4_Uncompress(data, n, &decompress_size);
|
|
static char lz4hc_corrupt_msg[] =
|
|
"LZ4HC not supported or corrupted LZ4HC compressed block contents";
|
|
if (!ubuf) {
|
|
return Status::Corruption(lz4hc_corrupt_msg);
|
|
}
|
|
result->data = Slice(ubuf, decompress_size);
|
|
result->heap_allocated = true;
|
|
result->cachable = true;
|
|
break;
|
|
default:
|
|
return Status::Corruption("bad block type");
|
|
}
|
|
result->compression_type = kNoCompression; // not compressed any more
|
|
return Status::OK();
|
|
}
|
|
|
|
} // namespace rocksdb
|