mirror of
https://github.com/ceph/ceph
synced 2025-04-01 14:51:13 +00:00
mon/OSDMonitor: use new style options
* also mark "mon_osd_reporter_subtree_level" safe. * change "mon_osd_min_down_reporters" to unsigned * change "osd_pool_default_size" to unsigned * change "osd_pool_default_min_size" to unsigned * change "osd_pool_default_pg_num" to unsigned * change "osd_pool_default_pgp_num" to unsigned Signed-off-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
parent
33b8797ffe
commit
582e567c93
src
common
crush
mon
osd
test/fio
@ -304,9 +304,9 @@ public:
|
||||
|
||||
public:
|
||||
unsigned get_osd_pool_default_min_size() const {
|
||||
return osd_pool_default_min_size ?
|
||||
MIN(osd_pool_default_min_size, osd_pool_default_size) :
|
||||
osd_pool_default_size - osd_pool_default_size / 2;
|
||||
auto min_size = get_val<uint64_t>("osd_pool_default_min_size");
|
||||
auto size = get_val<uint64_t>("osd_pool_default_size");
|
||||
return min_size ? std::min(min_size, size) : (size - size / 2);
|
||||
}
|
||||
|
||||
/** A lock that protects the md_config_t internals. It is
|
||||
|
@ -290,8 +290,6 @@ OPTION(mon_sync_timeout, OPT_DOUBLE)
|
||||
OPTION(mon_sync_max_payload_size, OPT_U32) // max size for a sync chunk payload (say)
|
||||
OPTION(mon_sync_debug, OPT_BOOL) // enable sync-specific debug
|
||||
OPTION(mon_inject_sync_get_chunk_delay, OPT_DOUBLE) // inject N second delay on each get_chunk request
|
||||
OPTION(mon_osd_min_down_reporters, OPT_INT) // number of OSDs from different subtrees who need to report a down OSD for it to count
|
||||
OPTION(mon_osd_reporter_subtree_level , OPT_STR) // in which level of parent bucket the reporters are counted
|
||||
OPTION(mon_osd_force_trim_to, OPT_INT) // force mon to trim maps to this point, regardless of min_last_epoch_clean (dangerous)
|
||||
OPTION(mon_mds_force_trim_to, OPT_INT) // force mon to trim mdsmaps to this point (dangerous)
|
||||
OPTION(mon_mds_skip_sanity, OPT_BOOL) // skip safety assertions on FSMap (in case of bugs where we want to continue anyway)
|
||||
@ -607,14 +605,7 @@ OPTION(osd_pool_use_gmt_hitset, OPT_BOOL) // try to use gmt for hitset archive n
|
||||
OPTION(osd_crush_update_on_start, OPT_BOOL)
|
||||
OPTION(osd_class_update_on_start, OPT_BOOL) // automatically set device class on start
|
||||
OPTION(osd_crush_initial_weight, OPT_DOUBLE) // if >=0, the initial weight is for newly added osds.
|
||||
OPTION(osd_pool_default_crush_rule, OPT_INT)
|
||||
OPTION(osd_pool_erasure_code_stripe_unit, OPT_U32) // in bytes
|
||||
OPTION(osd_pool_default_size, OPT_INT)
|
||||
OPTION(osd_pool_default_min_size, OPT_INT) // 0 means no specific default; ceph will use size-size/2
|
||||
OPTION(osd_pool_default_pg_num, OPT_INT) // number of PGs for new pools. Configure in global or mon section of ceph.conf
|
||||
OPTION(osd_pool_default_pgp_num, OPT_INT) // number of PGs for placement purposes. Should be equal to pg_num
|
||||
OPTION(osd_pool_default_type, OPT_STR)
|
||||
OPTION(osd_pool_default_erasure_code_profile, OPT_STR) // default properties of osd pool create
|
||||
OPTION(osd_erasure_code_plugins, OPT_STR) // list of erasure code plugins
|
||||
|
||||
// Allows the "peered" state for recovery and backfill below min_size
|
||||
|
@ -1287,13 +1287,17 @@ std::vector<Option> get_global_options() {
|
||||
.set_default(0)
|
||||
.set_description(""),
|
||||
|
||||
Option("mon_osd_min_down_reporters", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("mon_osd_min_down_reporters", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||
.set_default(2)
|
||||
.set_description(""),
|
||||
.add_service("mon")
|
||||
.set_description("number of OSDs from different subtrees who need to report a down OSD for it to count")
|
||||
.add_see_also("mon_osd_reporter_subtree_level"),
|
||||
|
||||
Option("mon_osd_reporter_subtree_level", Option::TYPE_STR, Option::LEVEL_ADVANCED)
|
||||
.set_default("host")
|
||||
.set_description(""),
|
||||
.add_service("mon")
|
||||
.set_safe()
|
||||
.set_description("in which level of parent bucket the reporters are counted"),
|
||||
|
||||
Option("mon_osd_force_trim_to", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
.set_default(0)
|
||||
@ -1705,29 +1709,37 @@ std::vector<Option> get_global_options() {
|
||||
.set_default(4_K)
|
||||
.set_description(""),
|
||||
|
||||
Option("osd_pool_default_size", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("osd_pool_default_size", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||
.set_default(3)
|
||||
.set_description(""),
|
||||
.set_description("the number of copies of an object")
|
||||
.add_service("mon"),
|
||||
|
||||
Option("osd_pool_default_min_size", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("osd_pool_default_min_size", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||
.set_default(0)
|
||||
.set_description(""),
|
||||
.set_description("the minimal number of copies allowed to write to a degraded pool")
|
||||
.set_long_description("0 means no specific default; ceph will use size-size/2")
|
||||
.add_see_also("osd_pool_default_size")
|
||||
.add_service("mon"),
|
||||
|
||||
Option("osd_pool_default_pg_num", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("osd_pool_default_pg_num", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||
.set_default(8)
|
||||
.set_description(""),
|
||||
.set_description("number of PGs for new pools. Configure in global or mon section of ceph.conf")
|
||||
.add_service("mon"),
|
||||
|
||||
Option("osd_pool_default_pgp_num", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("osd_pool_default_pgp_num", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||
.set_default(8)
|
||||
.set_description(""),
|
||||
.set_description("number of PGs for placement purposes. Should be equal to pg_num")
|
||||
.add_see_also("osd_pool_default_pg_num")
|
||||
.add_service("mon"),
|
||||
|
||||
Option("osd_pool_default_type", Option::TYPE_STR, Option::LEVEL_ADVANCED)
|
||||
.set_default("replicated")
|
||||
.set_enum_allowed({"replicated", "erasure"})
|
||||
.set_description(""),
|
||||
|
||||
Option("osd_pool_default_erasure_code_profile", Option::TYPE_STR, Option::LEVEL_ADVANCED)
|
||||
.set_default("plugin=jerasure technique=reed_sol_van k=2 m=1")
|
||||
.set_description(""),
|
||||
.set_description("default properties of osd pool create"),
|
||||
|
||||
Option("osd_erasure_code_plugins", Option::TYPE_STR, Option::LEVEL_ADVANCED)
|
||||
.set_default("jerasure lrc"
|
||||
|
@ -2994,7 +2994,7 @@ void CrushWrapper::generate_test_instances(list<CrushWrapper*>& o)
|
||||
*/
|
||||
int CrushWrapper::get_osd_pool_default_crush_replicated_ruleset(CephContext *cct)
|
||||
{
|
||||
int crush_ruleset = cct->_conf->osd_pool_default_crush_rule;
|
||||
int crush_ruleset = cct->_conf->get_val<int64_t>("osd_pool_default_crush_rule");
|
||||
if (crush_ruleset < 0) {
|
||||
crush_ruleset = find_first_ruleset(pg_pool_t::TYPE_REPLICATED);
|
||||
} else if (!ruleset_exists(crush_ruleset)) {
|
||||
|
@ -1884,7 +1884,7 @@ bool OSDMonitor::check_failure(utime_t now, int target_osd, failure_info_t& fi)
|
||||
}
|
||||
|
||||
set<string> reporters_by_subtree;
|
||||
string reporter_subtree_level = g_conf->mon_osd_reporter_subtree_level;
|
||||
auto reporter_subtree_level = g_conf->get_val<string>("mon_osd_reporter_subtree_level");
|
||||
utime_t orig_grace(g_conf->osd_heartbeat_grace, 0);
|
||||
utime_t max_failed_since = fi.get_failed_since();
|
||||
utime_t failed_for = now - max_failed_since;
|
||||
@ -1944,7 +1944,7 @@ bool OSDMonitor::check_failure(utime_t now, int target_osd, failure_info_t& fi)
|
||||
<< dendl;
|
||||
|
||||
if (failed_for >= grace &&
|
||||
(int)reporters_by_subtree.size() >= g_conf->mon_osd_min_down_reporters) {
|
||||
reporters_by_subtree.size() >= g_conf->get_val<uint64_t>("mon_osd_min_down_reporters")) {
|
||||
dout(1) << " we have enough reporters to mark osd." << target_osd
|
||||
<< " down" << dendl;
|
||||
pending_inc.new_state[target_osd] = CEPH_OSD_UP;
|
||||
@ -5222,7 +5222,7 @@ int OSDMonitor::parse_erasure_code_profile(const vector<string> &erasure_code_pr
|
||||
map<string,string> *erasure_code_profile_map,
|
||||
ostream *ss)
|
||||
{
|
||||
int r = get_json_str_map(g_conf->osd_pool_default_erasure_code_profile,
|
||||
int r = get_json_str_map(g_conf->get_val<string>("osd_pool_default_erasure_code_profile"),
|
||||
*ss,
|
||||
erasure_code_profile_map);
|
||||
if (r)
|
||||
@ -5265,8 +5265,8 @@ int OSDMonitor::prepare_pool_size(const unsigned pool_type,
|
||||
int err = 0;
|
||||
switch (pool_type) {
|
||||
case pg_pool_t::TYPE_REPLICATED:
|
||||
*size = g_conf->osd_pool_default_size;
|
||||
*min_size = g_conf->get_osd_pool_default_min_size();
|
||||
*size = g_conf->get_val<uint64_t>("osd_pool_default_size");
|
||||
*min_size = g_conf->get_val<uint64_t>("osd_pool_default_min_size");
|
||||
break;
|
||||
case pg_pool_t::TYPE_ERASURE:
|
||||
{
|
||||
@ -5473,9 +5473,9 @@ int OSDMonitor::prepare_new_pool(string& name, uint64_t auid,
|
||||
if (name.length() == 0)
|
||||
return -EINVAL;
|
||||
if (pg_num == 0)
|
||||
pg_num = g_conf->osd_pool_default_pg_num;
|
||||
pg_num = g_conf->get_val<uint64_t>("osd_pool_default_pg_num");
|
||||
if (pgp_num == 0)
|
||||
pgp_num = g_conf->osd_pool_default_pgp_num;
|
||||
pgp_num = g_conf->get_val<uint64_t>("osd_pool_default_pgp_num");
|
||||
if (pg_num > (unsigned)g_conf->mon_max_pool_pg_num) {
|
||||
*ss << "'pg_num' must be greater than 0 and less than or equal to "
|
||||
<< g_conf->mon_max_pool_pg_num
|
||||
@ -9900,7 +9900,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
|
||||
string pool_type_str;
|
||||
cmd_getval(cct, cmdmap, "pool_type", pool_type_str);
|
||||
if (pool_type_str.empty())
|
||||
pool_type_str = g_conf->osd_pool_default_type;
|
||||
pool_type_str = g_conf->get_val<string>("osd_pool_default_type");
|
||||
|
||||
string poolstr;
|
||||
cmd_getval(cct, cmdmap, "pool", poolstr);
|
||||
|
@ -3364,7 +3364,7 @@ int OSDMap::build_simple_optioned(CephContext *cct, epoch_t e, uuid_d &fsid,
|
||||
pools[pool].set_flag(pg_pool_t::FLAG_NOPGCHANGE);
|
||||
if (cct->_conf->osd_pool_default_flag_nosizechange)
|
||||
pools[pool].set_flag(pg_pool_t::FLAG_NOSIZECHANGE);
|
||||
pools[pool].size = cct->_conf->osd_pool_default_size;
|
||||
pools[pool].size = cct->_conf->get_val<uint64_t>("osd_pool_default_size");
|
||||
pools[pool].min_size = cct->_conf->get_osd_pool_default_min_size();
|
||||
pools[pool].crush_rule = default_replicated_rule;
|
||||
pools[pool].object_hash = CEPH_STR_HASH_RJENKINS;
|
||||
@ -3397,7 +3397,7 @@ int OSDMap::get_erasure_code_profile_default(CephContext *cct,
|
||||
map<string,string> &profile_map,
|
||||
ostream *ss)
|
||||
{
|
||||
int r = get_json_str_map(cct->_conf->osd_pool_default_erasure_code_profile,
|
||||
int r = get_json_str_map(cct->_conf->get_val<string>("osd_pool_default_erasure_code_profile"),
|
||||
*ss,
|
||||
&profile_map);
|
||||
return r;
|
||||
|
@ -207,7 +207,7 @@ Job::Job(Engine* engine, const thread_data* td)
|
||||
const uint64_t pool = Collection::MIN_POOL_ID + td->thread_number;
|
||||
|
||||
// create a collection for each object, up to osd_pool_default_pg_num
|
||||
uint32_t count = g_conf->osd_pool_default_pg_num;
|
||||
uint32_t count = get_val<uint64_t>("osd_pool_default_pg_num");
|
||||
if (count > td->o.nr_files)
|
||||
count = td->o.nr_files;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user