mirror of
https://github.com/ceph/ceph
synced 2025-01-19 01:21:49 +00:00
Merge pull request #12670 from liewegas/wip-nits
mon: fix a few nits Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
commit
13c01a0350
@ -626,7 +626,6 @@ function test_mon_misc()
|
||||
grep GLOBAL $TMPFILE
|
||||
grep -v DIRTY $TMPFILE
|
||||
ceph df detail > $TMPFILE
|
||||
grep CATEGORY $TMPFILE
|
||||
grep DIRTY $TMPFILE
|
||||
ceph df --format json > $TMPFILE
|
||||
grep 'total_bytes' $TMPFILE
|
||||
@ -1442,7 +1441,7 @@ function test_mon_osd_pool_set()
|
||||
wait_for_clean
|
||||
ceph osd pool get $TEST_POOL_GETSET all
|
||||
|
||||
for s in pg_num pgp_num size min_size crash_replay_interval crush_ruleset; do
|
||||
for s in pg_num pgp_num size min_size crash_replay_interval crush_rule crush_ruleset; do
|
||||
ceph osd pool get $TEST_POOL_GETSET $s
|
||||
done
|
||||
|
||||
@ -1546,6 +1545,7 @@ function test_mon_osd_pool_set()
|
||||
ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
|
||||
|
||||
ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
|
||||
ceph osd pool get rbd crush_rule | grep 'crush_rule: '
|
||||
}
|
||||
|
||||
function test_mon_osd_tiered_pool_set()
|
||||
@ -1884,8 +1884,8 @@ function test_mon_cephdf_commands()
|
||||
sleep 1
|
||||
done
|
||||
|
||||
cal_raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{printf "%d\n", 2 * $4}'`
|
||||
raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{print $11}'`
|
||||
cal_raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{printf "%d\n", 2 * $3}'`
|
||||
raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{print $10}'`
|
||||
|
||||
ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
|
||||
rm ./cephdf_for_test
|
||||
|
@ -130,9 +130,9 @@ if __name__ == '__main__':
|
||||
assert('epoch' in r.myjson['output'])
|
||||
|
||||
assert('GLOBAL' in expect('df', 'GET', 200, 'plain').text)
|
||||
assert('CATEGORY' in expect('df?detail=detail', 'GET', 200, 'plain').text)
|
||||
assert('DIRTY' in expect('df?detail=detail', 'GET', 200, 'plain').text)
|
||||
# test param with no value (treated as param=param)
|
||||
assert('CATEGORY' in expect('df?detail', 'GET', 200, 'plain').text)
|
||||
assert('DIRTY' in expect('df?detail', 'GET', 200, 'plain').text)
|
||||
|
||||
r = expect('df', 'GET', 200, 'json', JSONHDR)
|
||||
assert('total_used_bytes' in r.myjson['output']['stats'])
|
||||
|
@ -1758,7 +1758,8 @@ int MDSMonitor::management_command(
|
||||
// Persist the new FSMap
|
||||
pending_fsmap.filesystems[new_fs->fscid] = new_fs;
|
||||
return 0;
|
||||
} else if (prefix == "fs set_default") {
|
||||
} else if (prefix == "fs set_default" ||
|
||||
prefix == "fs set-default") {
|
||||
string fs_name;
|
||||
cmd_getval(g_ceph_context, cmdmap, "fs_name", fs_name);
|
||||
auto fs = pending_fsmap.get_filesystem(fs_name);
|
||||
|
@ -413,7 +413,11 @@ COMMAND("fs add_data_pool name=fs_name,type=CephString " \
|
||||
COMMAND("fs rm_data_pool name=fs_name,type=CephString " \
|
||||
"name=pool,type=CephString", \
|
||||
"remove data pool <pool>", "mds", "rw", "cli,rest")
|
||||
COMMAND("fs set_default name=fs_name,type=CephString", \
|
||||
COMMAND_WITH_FLAG("fs set_default name=fs_name,type=CephString", \
|
||||
"set the default to the named filesystem", \
|
||||
"fs", "rw", "cli,rest", \
|
||||
FLAG(DEPRECATED))
|
||||
COMMAND("fs set-default name=fs_name,type=CephString", \
|
||||
"set the default to the named filesystem", \
|
||||
"fs", "rw", "cli,rest")
|
||||
|
||||
@ -735,11 +739,11 @@ COMMAND("osd pool rename " \
|
||||
"rename <srcpool> to <destpool>", "osd", "rw", "cli,rest")
|
||||
COMMAND("osd pool get " \
|
||||
"name=pool,type=CephPoolname " \
|
||||
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block", \
|
||||
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_rule|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block", \
|
||||
"get pool parameter <var>", "osd", "r", "cli,rest")
|
||||
COMMAND("osd pool set " \
|
||||
"name=pool,type=CephPoolname " \
|
||||
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|debug_fake_ec_pool|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|debug_white_box_testing_ec_overwrites " \
|
||||
"name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_rule|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|debug_fake_ec_pool|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|debug_white_box_testing_ec_overwrites " \
|
||||
"name=val,type=CephString " \
|
||||
"name=force,type=CephChoices,strings=--yes-i-really-mean-it,req=false", \
|
||||
"set pool parameter <var> to <val>", "osd", "rw", "cli,rest")
|
||||
|
@ -3167,7 +3167,7 @@ void OSDMonitor::dump_info(Formatter *f)
|
||||
namespace {
|
||||
enum osd_pool_get_choices {
|
||||
SIZE, MIN_SIZE, CRASH_REPLAY_INTERVAL,
|
||||
PG_NUM, PGP_NUM, CRUSH_RULESET, HASHPSPOOL,
|
||||
PG_NUM, PGP_NUM, CRUSH_RULE, CRUSH_RULESET, HASHPSPOOL,
|
||||
NODELETE, NOPGCHANGE, NOSIZECHANGE,
|
||||
WRITE_FADVISE_DONTNEED, NOSCRUB, NODEEP_SCRUB,
|
||||
HIT_SET_TYPE, HIT_SET_PERIOD, HIT_SET_COUNT, HIT_SET_FPP,
|
||||
@ -3652,7 +3652,9 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
|
||||
{"size", SIZE},
|
||||
{"min_size", MIN_SIZE},
|
||||
{"crash_replay_interval", CRASH_REPLAY_INTERVAL},
|
||||
{"pg_num", PG_NUM}, {"pgp_num", PGP_NUM}, {"crush_ruleset", CRUSH_RULESET},
|
||||
{"pg_num", PG_NUM}, {"pgp_num", PGP_NUM},
|
||||
{"crush_rule", CRUSH_RULE},
|
||||
{"crush_ruleset", CRUSH_RULESET},
|
||||
{"hashpspool", HASHPSPOOL}, {"nodelete", NODELETE},
|
||||
{"nopgchange", NOPGCHANGE}, {"nosizechange", NOSIZECHANGE},
|
||||
{"noscrub", NOSCRUB}, {"nodeep-scrub", NODEEP_SCRUB},
|
||||
@ -3770,6 +3772,14 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
|
||||
f->dump_int("crash_replay_interval",
|
||||
p->get_crash_replay_interval());
|
||||
break;
|
||||
case CRUSH_RULE:
|
||||
if (osdmap.crush->rule_exists(p->get_crush_ruleset())) {
|
||||
f->dump_string("crush_rule", osdmap.crush->get_rule_name(
|
||||
p->get_crush_ruleset()));
|
||||
} else {
|
||||
f->dump_string("crush_rule", stringify(p->get_crush_ruleset()));
|
||||
}
|
||||
break;
|
||||
case CRUSH_RULESET:
|
||||
f->dump_int("crush_ruleset", p->get_crush_ruleset());
|
||||
break;
|
||||
@ -3926,6 +3936,14 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
|
||||
ss << "crash_replay_interval: " <<
|
||||
p->get_crash_replay_interval() << "\n";
|
||||
break;
|
||||
case CRUSH_RULE:
|
||||
if (osdmap.crush->rule_exists(p->get_crush_ruleset())) {
|
||||
ss << "crush_rule: " << osdmap.crush->get_rule_name(
|
||||
p->get_crush_ruleset()) << "\n";
|
||||
} else {
|
||||
ss << "crush_rule: " << p->get_crush_ruleset() << "\n";
|
||||
}
|
||||
break;
|
||||
case CRUSH_RULESET:
|
||||
ss << "crush_ruleset: " << p->get_crush_ruleset() << "\n";
|
||||
break;
|
||||
@ -5317,6 +5335,20 @@ int OSDMonitor::prepare_command_pool_set(map<string,cmd_vartype> &cmdmap,
|
||||
}
|
||||
}
|
||||
p.set_pgp_num(n);
|
||||
} else if (var == "crush_rule") {
|
||||
int id = osdmap.crush->get_rule_id(val);
|
||||
if (id == -ENOENT) {
|
||||
ss << "crush rule " << val << " does not exist";
|
||||
return -ENOENT;
|
||||
}
|
||||
if (id < 0) {
|
||||
ss << cpp_strerror(id);
|
||||
return -ENOENT;
|
||||
}
|
||||
if (!osdmap.crush->check_crush_rule(id, p.get_type(), p.get_size(), ss)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
p.crush_ruleset = id;
|
||||
} else if (var == "crush_ruleset") {
|
||||
if (interr.length()) {
|
||||
ss << "error parsing integer value '" << val << "': " << interr;
|
||||
|
@ -1641,7 +1641,7 @@ void PGMap::print_summary(Formatter *f, ostream *out) const
|
||||
|
||||
overall_cache_io_rate_summary(f, &ssr);
|
||||
if (!f && ssr.str().length())
|
||||
*out << " cache io " << ssr.str() << "\n";
|
||||
*out << " cache io " << ssr.str() << "\n";
|
||||
}
|
||||
|
||||
void PGMap::print_oneline_summary(Formatter *f, ostream *out) const
|
||||
@ -1883,7 +1883,6 @@ void PGMap::dump_pool_stats(const OSDMap &osd_map, stringstream *ss,
|
||||
tbl.define_column("NAME", TextTable::LEFT, TextTable::LEFT);
|
||||
tbl.define_column("ID", TextTable::LEFT, TextTable::LEFT);
|
||||
if (verbose) {
|
||||
tbl.define_column("CATEGORY", TextTable::LEFT, TextTable::LEFT);
|
||||
tbl.define_column("QUOTA OBJECTS", TextTable::LEFT, TextTable::LEFT);
|
||||
tbl.define_column("QUOTA BYTES", TextTable::LEFT, TextTable::LEFT);
|
||||
}
|
||||
@ -1957,8 +1956,6 @@ void PGMap::dump_pool_stats(const OSDMap &osd_map, stringstream *ss,
|
||||
tbl << pool_name
|
||||
<< pool_id;
|
||||
if (verbose) {
|
||||
tbl << "-";
|
||||
|
||||
if (pool->quota_max_objects == 0)
|
||||
tbl << "N/A";
|
||||
else
|
||||
|
@ -36,8 +36,8 @@ function TEST_pool_quota() {
|
||||
|
||||
local poolname=testquoa
|
||||
ceph osd pool create $poolname 20
|
||||
local objects=`ceph df detail | grep -w $poolname|awk '{print $4}'`
|
||||
local bytes=`ceph df detail | grep -w $poolname|awk '{print $5}'`
|
||||
local objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
|
||||
local bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
|
||||
|
||||
echo $objects
|
||||
echo $bytes
|
||||
@ -49,8 +49,8 @@ function TEST_pool_quota() {
|
||||
ceph osd pool set-quota $poolname max_objects 1000
|
||||
ceph osd pool set-quota $poolname max_bytes 1024
|
||||
|
||||
objects=`ceph df detail | grep -w $poolname|awk '{print $4}'`
|
||||
bytes=`ceph df detail | grep -w $poolname|awk '{print $5}'`
|
||||
objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
|
||||
bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
|
||||
|
||||
if [ $objects != '1000' ] || [ $bytes != '1024' ] ;
|
||||
then
|
||||
|
@ -505,9 +505,9 @@ class TestFS(TestArgparse):
|
||||
assert_equal({}, validate_command(sigdict, ['fs', 'ls', 'toomany']))
|
||||
|
||||
def test_fs_set_default(self):
|
||||
self.assert_valid_command(['fs', 'set_default', 'cephfs'])
|
||||
assert_equal({}, validate_command(sigdict, ['fs', 'set_default']))
|
||||
assert_equal({}, validate_command(sigdict, ['fs', 'set_default', 'cephfs', 'toomany']))
|
||||
self.assert_valid_command(['fs', 'set-default', 'cephfs'])
|
||||
assert_equal({}, validate_command(sigdict, ['fs', 'set-default']))
|
||||
assert_equal({}, validate_command(sigdict, ['fs', 'set-default', 'cephfs', 'toomany']))
|
||||
|
||||
class TestMon(TestArgparse):
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user