1
0
mirror of https://github.com/ceph/ceph synced 2025-04-01 23:02:17 +00:00

mgr/DaemonServer: safe-to-destroy - do not consider irrelevant pgs

otherwise ceph osd safe-to-destroy would say NO even if we want to
destroy an down+out osd from an actually active+clean pool!

Signed-off-by: xie xingguo <xie.xingguo@zte.com.cn>
This commit is contained in:
xie xingguo 2019-05-05 13:17:32 +08:00
parent 30a8d911eb
commit ba8dd78236

View File

@ -1349,6 +1349,33 @@ bool DaemonServer::_handle_command(
safe_to_destroy.insert(osd);
continue; // clearly safe to destroy
}
set<int64_t> pools;
osdmap.get_pool_ids_by_osd(g_ceph_context, osd, &pools);
if (pools.empty()) {
// osd does not belong to any pools yet
safe_to_destroy.insert(osd);
continue;
}
if (osdmap.is_down(osd) && osdmap.is_out(osd)) {
// if osd is down&out and all relevant pools are active+clean,
// then should be safe to destroy
bool all_osd_pools_active_clean = true;
for (auto &ps: pg_map.pg_stat) {
auto& pg = ps.first;
auto state = ps.second.state;
if (!pools.count(pg.pool()))
continue;
if ((state & (PG_STATE_ACTIVE | PG_STATE_CLEAN)) !=
(PG_STATE_ACTIVE | PG_STATE_CLEAN)) {
all_osd_pools_active_clean = false;
break;
}
}
if (all_osd_pools_active_clean) {
safe_to_destroy.insert(osd);
continue;
}
}
auto q = pg_map.num_pg_by_osd.find(osd);
if (q != pg_map.num_pg_by_osd.end()) {
if (q->second.acting > 0 || q->second.up > 0) {