Skip to content

Commit

Permalink
Add test cases with check CheckDBOpenedAsCompactedDBWithOnlyLmaxFiles…
Browse files Browse the repository at this point in the history
… for Get and MultiGet
  • Loading branch information
jowlyzhang committed May 23, 2022
1 parent 9ddb9f4 commit e1ecdfb
Showing 1 changed file with 158 additions and 14 deletions.
172 changes: 158 additions & 14 deletions db/db_readonly_with_timestamp_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ class DBReadOnlyTestWithTimestamp : public DBBasicTestWithTimestampBase {
: DBBasicTestWithTimestampBase("db_readonly_test_with_timestamp") {}

protected:
void CheckDBOpenedAsCompactedDB() {
#ifndef ROCKSDB_LITE
void CheckDBOpenedAsCompactedDBWithOneLevel0File() {
VersionSet* const versions = dbfull()->GetVersionSet();
ASSERT_NE(versions, nullptr);

Expand All @@ -35,6 +36,33 @@ class DBReadOnlyTestWithTimestamp : public DBBasicTestWithTimestampBase {
// L0 is the max level.
ASSERT_EQ(storage_info->num_non_empty_levels(), 1);
}

void CheckDBOpenedAsCompactedDBWithOnlyLmaxFiles() {
VersionSet* const versions = dbfull()->GetVersionSet();
ASSERT_NE(versions, nullptr);

ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault();
ASSERT_NE(cfd, nullptr);

Version* const current = cfd->current();
ASSERT_NE(current, nullptr);

const VersionStorageInfo* const storage_info = current->storage_info();
ASSERT_NE(storage_info, nullptr);

// L0 has no files.
ASSERT_EQ(0, NumTableFilesAtLevel(0));

// All other levels except the max level have no files.
for (int i = 1; i < storage_info->num_non_empty_levels() - 1; ++i) {
ASSERT_FALSE(storage_info->LevelFilesBrief(i).num_files > 0);
}

// Lmax has files.
int max_level = storage_info->num_non_empty_levels() - 1;
ASSERT_TRUE(storage_info->LevelFilesBrief(max_level).num_files > 0);
}
#endif // !ROCKSDB_LITE
};

#ifndef ROCKSDB_LITE
Expand Down Expand Up @@ -443,7 +471,7 @@ TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBGetReadTimestampSizeMismatch) {
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

ReadOptions read_opts;
std::string different_size_read_timestamp;
Expand Down Expand Up @@ -482,7 +510,7 @@ TEST_F(DBReadOnlyTestWithTimestamp,
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

ReadOptions read_opts;
const std::string read_timestamp = Timestamp(2, 0);
Expand Down Expand Up @@ -525,7 +553,7 @@ TEST_F(DBReadOnlyTestWithTimestamp,
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

ReadOptions read_opts;
for (uint64_t key = 0; key <= kMaxKey; ++key) {
Expand All @@ -536,7 +564,7 @@ TEST_F(DBReadOnlyTestWithTimestamp,
Close();
}

TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBGet) {
TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBGetWithOnlyOneL0File) {
const int kNumKeysPerFile = 1026 * 2;
const uint64_t kMaxKey = 1024;
Options options = CurrentOptions();
Expand Down Expand Up @@ -569,12 +597,65 @@ TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBGet) {
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

for (size_t i = 0; i < read_timestamps.size(); ++i) {
ReadOptions read_opts;
Slice read_ts = read_timestamps[i];
read_opts.timestamp = &read_ts;
int count = 0;
for (uint64_t key = start_keys[i]; key <= kMaxKey; ++key, ++count) {
std::string value_from_get;
std::string timestamp;
ASSERT_OK(db_->Get(read_opts, Key1(key), &value_from_get, &timestamp));
ASSERT_EQ("value" + std::to_string(i), value_from_get);
ASSERT_EQ(write_timestamps[i], timestamp);
}
size_t expected_count = kMaxKey - start_keys[i] + 1;
ASSERT_EQ(expected_count, count);
}
Close();
}

TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBGetWithOnlyLmaxFiles) {
const int kNumKeysPerFile = 128;
const uint64_t kMaxKey = 1024;
Options options = CurrentOptions();
options.env = env_;
options.create_if_missing = true;
options.disable_auto_compactions = true;
const size_t kTimestampSize = Timestamp(0, 0).size();
TestComparator test_cmp(kTimestampSize);
options.comparator = &test_cmp;
options.memtable_factory.reset(
test::NewSpecialSkipListFactory(kNumKeysPerFile));
DestroyAndReopen(options);
const std::vector<uint64_t> start_keys = {1, 0};
const std::vector<std::string> write_timestamps = {Timestamp(1, 0),
Timestamp(3, 0)};
const std::vector<std::string> read_timestamps = {Timestamp(2, 0),
Timestamp(4, 0)};
for (size_t i = 0; i < write_timestamps.size(); ++i) {
WriteOptions write_opts;
for (uint64_t key = start_keys[i]; key <= kMaxKey; ++key) {
Status s = db_->Put(write_opts, Key1(key), write_timestamps[i],
"value" + std::to_string(i));
ASSERT_OK(s);
}
}
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
Close();

// Reopen the database in read only mode as a Compacted DB to test its
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDBWithOnlyLmaxFiles();

for (size_t i = 0; i < read_timestamps.size(); ++i) {
ReadOptions read_opts;
Slice read_ts = read_timestamps[i];
std::string read_ts_str = read_ts.ToString();
read_opts.timestamp = &read_ts;
int count = 0;
for (uint64_t key = start_keys[i]; key <= kMaxKey; ++key, ++count) {
Expand Down Expand Up @@ -618,7 +699,7 @@ TEST_F(DBReadOnlyTestWithTimestamp,
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

ReadOptions read_opts;
std::string different_size_read_timestamp;
Expand Down Expand Up @@ -666,12 +747,11 @@ TEST_F(DBReadOnlyTestWithTimestamp,
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

ReadOptions read_opts;
std::string read_timestamp = Timestamp(2, 0);
Slice read_ts = read_timestamp;
std::string read_ts_str = read_ts.ToString();
read_opts.timestamp = &read_ts;
std::vector<std::string> key_strs;
std::vector<Slice> keys;
Expand Down Expand Up @@ -719,7 +799,7 @@ TEST_F(DBReadOnlyTestWithTimestamp,
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

ReadOptions read_opts;
std::vector<std::string> key_strs;
Expand All @@ -738,7 +818,7 @@ TEST_F(DBReadOnlyTestWithTimestamp,
Close();
}

TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBMultiGet) {
TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBMultiGetWithOnlyOneL0File) {
const int kNumKeysPerFile = 1026 * 2;
const uint64_t kMaxKey = 1024;
Options options = CurrentOptions();
Expand Down Expand Up @@ -771,12 +851,76 @@ TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBMultiGet) {
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDB();
CheckDBOpenedAsCompactedDBWithOneLevel0File();

for (size_t i = 0; i < write_timestamps.size(); ++i) {
ReadOptions read_opts;
Slice read_ts = read_timestamps[i];
read_opts.timestamp = &read_ts;
std::vector<std::string> key_strs;
std::vector<Slice> keys;
for (uint64_t key = start_keys[i]; key <= kMaxKey; ++key) {
key_strs.push_back(Key1(key));
}
for (const auto& key_str : key_strs) {
keys.emplace_back(key_str);
}
size_t batch_size = kMaxKey - start_keys[i] + 1;
std::vector<std::string> values;
std::vector<std::string> timestamps;
std::vector<Status> status_list =
db_->MultiGet(read_opts, keys, &values, &timestamps);
ASSERT_EQ(batch_size, values.size());
ASSERT_EQ(batch_size, timestamps.size());
for (uint64_t idx = 0; idx < values.size(); ++idx) {
ASSERT_EQ("value" + std::to_string(i), values[idx]);
ASSERT_EQ(write_timestamps[i], timestamps[idx]);
ASSERT_OK(status_list[idx]);
}
}

Close();
}

TEST_F(DBReadOnlyTestWithTimestamp, CompactedDBMultiGetWithOnlyLmaxFiles) {
const int kNumKeysPerFile = 128;
const uint64_t kMaxKey = 1024;
Options options = CurrentOptions();
options.env = env_;
options.create_if_missing = true;
options.disable_auto_compactions = true;
const size_t kTimestampSize = Timestamp(0, 0).size();
TestComparator test_cmp(kTimestampSize);
options.comparator = &test_cmp;
options.memtable_factory.reset(
test::NewSpecialSkipListFactory(kNumKeysPerFile));
DestroyAndReopen(options);
const std::vector<uint64_t> start_keys = {1, 0};
const std::vector<std::string> write_timestamps = {Timestamp(1, 0),
Timestamp(3, 0)};
const std::vector<std::string> read_timestamps = {Timestamp(2, 0),
Timestamp(4, 0)};
for (size_t i = 0; i < write_timestamps.size(); ++i) {
WriteOptions write_opts;
for (uint64_t key = start_keys[i]; key <= kMaxKey; ++key) {
Status s = db_->Put(write_opts, Key1(key), write_timestamps[i],
"value" + std::to_string(i));
ASSERT_OK(s);
}
}
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
Close();

// Reopen the database in read only mode as a Compacted DB to test its
// timestamp support.
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
CheckDBOpenedAsCompactedDBWithOnlyLmaxFiles();

for (size_t i = 0; i < write_timestamps.size(); ++i) {
ReadOptions read_opts;
Slice read_ts = read_timestamps[i];
std::string read_ts_str = read_ts.ToString();
read_opts.timestamp = &read_ts;
std::vector<std::string> key_strs;
std::vector<Slice> keys;
Expand Down

0 comments on commit e1ecdfb

Please sign in to comment.