rgw: in bucket reshard list, clarify new num shards is tentative

With dynamic bucket index resharding, when the average number of
objects per shard exceeds the configured value, that bucket is
scheduled for reshard. That bucket may receive more new objects before
the resharding takes place. As a result, the existing code
re-calculates the number of new shards just prior to resharding,
rather than waste a resharding opportunity with too low a value.

The same holds true for a user-scheduled resharding.

A user reported confusion that the number reported in `radosgw-admin
reshard list` wasn't the number that the reshard operation ultimately
used. This commit makes it clear that the new number of shards is
"tentative". And test_rgw_reshard.py is updated to reflect this
altered output.

Additionally this commit adds some modernization and efficiency to the
"reshard list" subcommand.

Signed-off-by: J. Eric Ivancich <ivancich@redhat.com>
This commit is contained in:
J. Eric Ivancich 2021-12-22 14:45:59 -05:00
parent be82f81503
commit aa0071ce8b
3 changed files with 10 additions and 10 deletions

View File

@ -149,7 +149,7 @@ def main():
json_op = json.loads(cmd)
log.debug('bucket name %s', json_op[0]['bucket_name'])
assert json_op[0]['bucket_name'] == BUCKET_NAME1
assert json_op[0]['new_num_shards'] == num_shards_expected
assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
# TESTCASE 'reshard-process','reshard','','process bucket resharding','succeeds'
log.debug(' test: reshard process')
@ -174,7 +174,7 @@ def main():
json_op = json.loads(cmd)
log.debug('bucket name %s', json_op[0]['bucket_name'])
assert json_op[0]['bucket_name'] == BUCKET_NAME1
assert json_op[0]['new_num_shards'] == num_shards_expected
assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
# TESTCASE 'reshard process ,'reshard','process','reshard non empty bucket','succeeds'
log.debug(' test: reshard process non empty bucket')

View File

@ -721,8 +721,7 @@ void cls_rgw_reshard_entry::dump(Formatter *f) const
encode_json("bucket_id", bucket_id, f);
encode_json("new_instance_id", new_instance_id, f);
encode_json("old_num_shards", old_num_shards, f);
encode_json("new_num_shards", new_num_shards, f);
encode_json("tentative_new_num_shards", new_num_shards, f);
}
void cls_rgw_reshard_entry::generate_test_instances(list<cls_rgw_reshard_entry*>& ls)

View File

@ -7211,7 +7211,6 @@ next:
}
if (opt_cmd == OPT::RESHARD_LIST) {
list<cls_rgw_reshard_entry> entries;
int ret;
int count = 0;
if (max_entries < 0) {
@ -7226,19 +7225,20 @@ next:
formatter->open_array_section("reshard");
for (int i = 0; i < num_logshards; i++) {
bool is_truncated = true;
string marker;
std::string marker;
do {
entries.clear();
std::list<cls_rgw_reshard_entry> entries;
ret = reshard.list(dpp(), i, marker, max_entries - count, entries, &is_truncated);
if (ret < 0) {
cerr << "Error listing resharding buckets: " << cpp_strerror(-ret) << std::endl;
return ret;
}
for (auto iter=entries.begin(); iter != entries.end(); ++iter) {
cls_rgw_reshard_entry& entry = *iter;
for (const auto& entry : entries) {
encode_json("entry", entry, formatter.get());
entry.get_key(&marker);
}
if (is_truncated) {
entries.crbegin()->get_key(&marker); // last entry's key becomes marker
}
count += entries.size();
formatter->flush(cout);
} while (is_truncated && count < max_entries);
@ -7250,6 +7250,7 @@ next:
formatter->close_section();
formatter->flush(cout);
return 0;
}