Fix all warnings generated by -Wall option to the compiler.

Summary:
The default compilation process now uses "-Wall" to compile.
Fix all compilation error generated by gcc.

Test Plan: make all check

Reviewers: heyongqiang, emayanke, sheki

Reviewed By: heyongqiang

CC: MarkCallaghan

Differential Revision: https://reviews.facebook.net/D6525
This commit is contained in:
Dhruba Borthakur
2012-11-06 12:02:18 -08:00
parent cb7a00227f
commit aa42c66814
35 changed files with 143 additions and 142 deletions

View File

@@ -40,7 +40,7 @@ TEST(ArenaTest, Simple) {
r = arena.Allocate(s);
}
for (int b = 0; b < s; b++) {
for (unsigned int b = 0; b < s; b++) {
// Fill the "i"th allocation with a known bit pattern
r[b] = i % 256;
}
@@ -51,12 +51,12 @@ TEST(ArenaTest, Simple) {
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
}
}
for (int i = 0; i < allocated.size(); i++) {
for (unsigned int i = 0; i < allocated.size(); i++) {
size_t num_bytes = allocated[i].first;
const char* p = allocated[i].second;
for (int b = 0; b < num_bytes; b++) {
for (unsigned int b = 0; b < num_bytes; b++) {
// Check the "i"th allocation for the known bit pattern
ASSERT_EQ(int(p[b]) & 0xff, i % 256);
ASSERT_EQ(int(p[b]) & 0xff, (int)(i % 256));
}
}
}

View File

@@ -33,8 +33,11 @@ class AutoSplitLogger : public Logger {
public:
AutoSplitLogger(Env* env, const std::string& dbname,
const std::string& db_log_dir, size_t log_max_size):
env_(env), dbname_(dbname), db_log_dir_(db_log_dir),
MAX_LOG_FILE_SIZE(log_max_size), status_(Status::OK()) {
dbname_(dbname),
db_log_dir_(db_log_dir),
env_(env),
MAX_LOG_FILE_SIZE(log_max_size),
status_(Status::OK()) {
env->GetAbsolutePath(dbname, &db_absolute_path_);
log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_);
InitLogger();
@@ -67,7 +70,7 @@ class AutoSplitLogger : public Logger {
logger_ = NULL;
}
if (logger_->GetLogFileSize() ==
Logger::DO_NOT_SUPPORT_GET_LOG_FILE_SIZE) {
(size_t)Logger::DO_NOT_SUPPORT_GET_LOG_FILE_SIZE) {
status_ = Status::NotSupported(
"The underlying logger doesn't support GetLogFileSize()");
}

View File

@@ -58,7 +58,7 @@ class BloomFilterPolicy : public FilterPolicy {
dst->resize(init_size + bytes, 0);
dst->push_back(static_cast<char>(k_)); // Remember # of probes in filter
char* array = &(*dst)[init_size];
for (size_t i = 0; i < n; i++) {
for (size_t i = 0; i < (size_t)n; i++) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
uint32_t h = hash_func_(keys[i]);

View File

@@ -125,7 +125,7 @@ TEST(BloomTest, VaryingLengths) {
}
Build();
ASSERT_LE(FilterSize(), (length * 10 / 8) + 40) << length;
ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 40)) << length;
// All added keys must match
for (int i = 0; i < length; i++) {

View File

@@ -116,7 +116,6 @@ class HandleTable {
LRUHandle* h = list_[i];
while (h != NULL) {
LRUHandle* next = h->next_hash;
Slice key = h->key();
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
@@ -268,7 +267,6 @@ void LRUCache::Erase(const Slice& key, uint32_t hash) {
}
static int kNumShardBits = 4; // default values, can be overridden
static int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:

View File

@@ -83,28 +83,28 @@ TEST(CacheTest, HitAndMiss) {
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
}
TEST(CacheTest, Erase) {
Erase(200);
ASSERT_EQ(0, deleted_keys_.size());
ASSERT_EQ(0U, deleted_keys_.size());
Insert(100, 101);
Insert(200, 201);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(1U, deleted_keys_.size());
}
TEST(CacheTest, EntriesArePinned) {
@@ -115,19 +115,19 @@ TEST(CacheTest, EntriesArePinned) {
Insert(100, 102);
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
ASSERT_EQ(0, deleted_keys_.size());
ASSERT_EQ(0U, deleted_keys_.size());
cache_->Release(h1);
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(1U, deleted_keys_.size());
cache_->Release(h2);
ASSERT_EQ(2, deleted_keys_.size());
ASSERT_EQ(2U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[1]);
ASSERT_EQ(102, deleted_values_[1]);
}

View File

@@ -79,7 +79,7 @@ void PutVarint32(std::string* dst, uint32_t v) {
}
char* EncodeVarint64(char* dst, uint64_t v) {
static const int B = 128;
static const unsigned int B = 128;
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
while (v >= B) {
*(ptr++) = (v & (B-1)) | B;

View File

@@ -55,7 +55,7 @@ TEST(Coding, Fixed64) {
TEST(Coding, EncodingOutput) {
std::string dst;
PutFixed32(&dst, 0x04030201);
ASSERT_EQ(4, dst.size());
ASSERT_EQ(4U, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
@@ -63,7 +63,7 @@ TEST(Coding, EncodingOutput) {
dst.clear();
PutFixed64(&dst, 0x0807060504030201ull);
ASSERT_EQ(8, dst.size());
ASSERT_EQ(8U, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
@@ -112,13 +112,13 @@ TEST(Coding, Varint64) {
};
std::string s;
for (int i = 0; i < values.size(); i++) {
for (unsigned int i = 0; i < values.size(); i++) {
PutVarint64(&s, values[i]);
}
const char* p = s.data();
const char* limit = p + s.size();
for (int i = 0; i < values.size(); i++) {
for (unsigned int i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64_t actual;
const char* start = p;
@@ -143,7 +143,7 @@ TEST(Coding, Varint32Truncation) {
std::string s;
PutVarint32(&s, large_value);
uint32_t result;
for (int len = 0; len < s.size() - 1; len++) {
for (unsigned int len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL);
}
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL);
@@ -162,7 +162,7 @@ TEST(Coding, Varint64Truncation) {
std::string s;
PutVarint64(&s, large_value);
uint64_t result;
for (int len = 0; len < s.size() - 1; len++) {
for (unsigned int len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL);
}
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL);

View File

@@ -15,20 +15,20 @@ TEST(CRC, StandardResults) {
char buf[32];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf)));
ASSERT_EQ(0x8a9136aaU, Value(buf, sizeof(buf)));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf)));
ASSERT_EQ(0x62a8ab43U, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = i;
}
ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf)));
ASSERT_EQ(0x46dd794eU, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = 31 - i;
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
ASSERT_EQ(0x113fdb5cU, Value(buf, sizeof(buf)));
unsigned char data[48] = {
0x01, 0xc0, 0x00, 0x00,

View File

@@ -61,7 +61,7 @@ TEST(EnvPosixTest, RunMany) {
Env::Default()->SleepForMicroseconds(kDelayMicros);
void* cur = last_id.Acquire_Load();
ASSERT_EQ(4, reinterpret_cast<uintptr_t>(cur));
ASSERT_EQ(4U, reinterpret_cast<uintptr_t>(cur));
}
struct State {

View File

@@ -12,7 +12,7 @@ const char* LDBCommand::HEX_ARG = "--hex";
Compactor::Compactor(std::string& db_name, std::vector<std::string>& args) :
LDBCommand(db_name, args), null_from_(true), null_to_(true), hex_(false) {
for (int i = 0; i < args.size(); i++) {
for (unsigned int i = 0; i < args.size(); i++) {
std::string& arg = args.at(i);
if (arg.find(FROM_ARG) == 0) {
null_from_ = false;
@@ -68,10 +68,15 @@ const char* DBDumper::STATS_ARG = "--stats";
const char* DBDumper::HEX_OUTPUT_ARG = "--output_hex";
DBDumper::DBDumper(std::string& db_name, std::vector<std::string>& args) :
LDBCommand(db_name, args), null_from_(true), null_to_(true), hex_(false),
count_only_(false), print_stats_(false), max_keys_(-1),
hex_output_(false) {
for (int i = 0; i < args.size(); i++) {
LDBCommand(db_name, args),
null_from_(true),
null_to_(true),
max_keys_(-1),
count_only_(false),
print_stats_(false),
hex_(false),
hex_output_(false) {
for (unsigned int i = 0; i < args.size(); i++) {
std::string& arg = args.at(i);
if (arg.find(FROM_ARG) == 0) {
null_from_ = false;
@@ -154,12 +159,12 @@ void DBDumper::DoCommand() {
if (!count_only_) {
if (hex_output_) {
std::string str = iter->key().ToString();
for (int i = 0; i < str.length(); ++i) {
for (unsigned int i = 0; i < str.length(); ++i) {
fprintf(stdout, "%X", str[i]);
}
fprintf(stdout, " ==> ");
str = iter->value().ToString();
for (int i = 0; i < str.length(); ++i) {
for (unsigned int i = 0; i < str.length(); ++i) {
fprintf(stdout, "%X", str[i]);
}
fprintf(stdout, "\n");
@@ -183,7 +188,7 @@ ReduceDBLevels::ReduceDBLevels(std::string& db_name,
: LDBCommand(db_name, args),
new_levels_(-1),
print_old_levels_(false) {
for (int i = 0; i < args.size(); i++) {
for (unsigned int i = 0; i < args.size(); i++) {
std::string& arg = args.at(i);
if (arg.find(NEW_LEVLES_ARG) == 0) {
new_levels_ = atoi(arg.substr(strlen(NEW_LEVLES_ARG)).c_str());

View File

@@ -143,7 +143,7 @@ public:
static std::string HexToString(const std::string& str) {
std::string parsed;
for (int i = 0; i < str.length();) {
for (unsigned int i = 0; i < str.length();) {
int c;
sscanf(str.c_str() + i, "%2X", &c);
parsed.push_back(c);

View File

@@ -61,7 +61,7 @@ bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
char c = (*in)[0];
if (c >= '0' && c <= '9') {
++digits;
const int delta = (c - '0');
const unsigned int delta = (c - '0');
static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0);
if (v > kMaxUint64/10 ||
(v == kMaxUint64/10 && delta > kMaxUint64%10)) {

View File

@@ -43,11 +43,11 @@ Options::Options()
db_stats_log_interval(1800),
db_log_dir(""),
disable_seek_compaction(false),
delete_obsolete_files_period_micros(0),
max_log_file_size(0),
rate_limit(0.0),
no_block_cache(false),
table_cache_numshardbits(4),
max_log_file_size(0),
delete_obsolete_files_period_micros(0),
rate_limit(0.0),
CompactionFilter(NULL) {
}
@@ -71,7 +71,7 @@ Options::Dump(
Log(log," Options.block_size: %zd", block_size);
Log(log,"Options.block_restart_interval: %d", block_restart_interval);
if (compression_per_level != NULL) {
for (unsigned int i = 0; i < num_levels; i++){
for (int i = 0; i < num_levels; i++){
Log(log," Options.compression[%d]: %d",
i, compression_per_level[i]);
}
@@ -83,7 +83,7 @@ Options::Dump(
Log(log," Options.num_levels: %d", num_levels);
Log(log," Options.disableDataSync: %d", disableDataSync);
Log(log," Options.use_fsync: %d", use_fsync);
Log(log," Options.max_log_file_size: %d", max_log_file_size);
Log(log," Options.max_log_file_size: %ld", max_log_file_size);
Log(log," Options.db_stats_log_interval: %d",
db_stats_log_interval);
Log(log," Options.compression_opts.window_bits: %d",

View File

@@ -38,7 +38,7 @@ int RunAllTests() {
int num = 0;
if (tests != NULL) {
for (int i = 0; i < tests->size(); i++) {
for (unsigned int i = 0; i < tests->size(); i++) {
const Test& t = (*tests)[i];
if (matcher != NULL) {
std::string name = t.base;

View File

@@ -40,7 +40,7 @@ extern Slice CompressibleString(Random* rnd, double compressed_fraction,
// Duplicate the random data until we have filled "len" bytes
dst->clear();
while (dst->size() < len) {
while (dst->size() < (unsigned int)len) {
dst->append(raw_data);
}
dst->resize(len);