mirror of
https://github.com/ceph/ceph
synced 2025-01-02 17:12:31 +00:00
osd,mon: change "mon_max_pg_per_osd" to uint64_t
as it should never be a negative number. if this option is disabled, it's 0. Signed-off-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
parent
4c7df944c7
commit
0d68197376
@ -1052,7 +1052,7 @@ std::vector<Option> get_global_options() {
|
||||
.set_default(30)
|
||||
.set_description(""),
|
||||
|
||||
Option("mon_max_pg_per_osd", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("mon_max_pg_per_osd", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
|
||||
.set_default(200)
|
||||
.set_description("Max number of PGs per OSD the cluster will allow"),
|
||||
|
||||
|
@ -5421,10 +5421,10 @@ int OSDMonitor::get_crush_rule(const string &rule_name,
|
||||
|
||||
int OSDMonitor::check_pg_num(int64_t pool, int pg_num, int size, ostream *ss)
|
||||
{
|
||||
int64_t max_pgs_per_osd = g_conf->get_val<int64_t>("mon_max_pg_per_osd");
|
||||
int num_osds = MAX(osdmap.get_num_in_osds(), 3); // assume min cluster size 3
|
||||
int64_t max_pgs = max_pgs_per_osd * num_osds;
|
||||
int64_t projected = 0;
|
||||
auto max_pgs_per_osd = g_conf->get_val<uint64_t>("mon_max_pg_per_osd");
|
||||
auto num_osds = std::max(osdmap.get_num_in_osds(), 3u); // assume min cluster size 3
|
||||
auto max_pgs = max_pgs_per_osd * num_osds;
|
||||
uint64_t projected = 0;
|
||||
if (pool < 0) {
|
||||
projected += pg_num * size;
|
||||
}
|
||||
|
@ -2393,12 +2393,12 @@ void PGMap::get_health_checks(
|
||||
}
|
||||
|
||||
// TOO_FEW_PGS
|
||||
int num_in = osdmap.get_num_in_osds();
|
||||
int sum_pg_up = MAX(pg_sum.up, static_cast<int32_t>(pg_stat.size()));
|
||||
unsigned num_in = osdmap.get_num_in_osds();
|
||||
auto sum_pg_up = std::max(static_cast<size_t>(pg_sum.up), pg_stat.size());
|
||||
if (num_in &&
|
||||
cct->_conf->mon_pg_warn_min_per_osd > 0 &&
|
||||
osdmap.get_pools().size() > 0) {
|
||||
int per = sum_pg_up / num_in;
|
||||
auto per = sum_pg_up / num_in;
|
||||
if (per < cct->_conf->mon_pg_warn_min_per_osd && per) {
|
||||
ostringstream ss;
|
||||
ss << "too few PGs per OSD (" << per
|
||||
@ -2408,9 +2408,9 @@ void PGMap::get_health_checks(
|
||||
}
|
||||
|
||||
// TOO_MANY_PGS
|
||||
int64_t max_pg_per_osd = cct->_conf->get_val<int64_t>("mon_max_pg_per_osd");
|
||||
auto max_pg_per_osd = cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd");
|
||||
if (num_in && max_pg_per_osd > 0) {
|
||||
int per = sum_pg_up / num_in;
|
||||
auto per = sum_pg_up / num_in;
|
||||
if (per > max_pg_per_osd) {
|
||||
ostringstream ss;
|
||||
ss << "too many PGs per OSD (" << per
|
||||
|
@ -4169,7 +4169,7 @@ int OSD::handle_pg_peering_evt(
|
||||
bool OSD::maybe_wait_for_max_pg(spg_t pgid, bool is_mon_create)
|
||||
{
|
||||
const auto max_pgs_per_osd =
|
||||
(cct->_conf->get_val<int64_t>("mon_max_pg_per_osd") *
|
||||
(cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd") *
|
||||
cct->_conf->get_val<double>("osd_max_pg_per_osd_hard_ratio"));
|
||||
|
||||
RWLock::RLocker pg_map_locker{pg_map_lock};
|
||||
@ -4206,7 +4206,7 @@ void OSD::resume_creating_pg()
|
||||
MOSDPGTemp *pgtemp = nullptr;
|
||||
{
|
||||
const auto max_pgs_per_osd =
|
||||
(cct->_conf->get_val<int64_t>("mon_max_pg_per_osd") *
|
||||
(cct->_conf->get_val<uint64_t>("mon_max_pg_per_osd") *
|
||||
cct->_conf->get_val<double>("osd_max_pg_per_osd_hard_ratio"));
|
||||
RWLock::RLocker l(pg_map_lock);
|
||||
if (max_pgs_per_osd <= pg_map.size()) {
|
||||
|
Loading…
Reference in New Issue
Block a user