mirror of
https://github.com/ceph/ceph
synced 2025-01-29 14:34:40 +00:00
Merge pull request #14490 from liewegas/wip-no-luminous
mon: add mon_debug_no_require_luminous Reviewed-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
commit
d82ac99d70
1
qa/suites/rados/basic/z-require-luminous
Symbolic link
1
qa/suites/rados/basic/z-require-luminous
Symbolic link
@ -0,0 +1 @@
|
||||
../thrash/z-require-luminous
|
1
qa/suites/rados/monthrash/z-require-luminous
Symbolic link
1
qa/suites/rados/monthrash/z-require-luminous
Symbolic link
@ -0,0 +1 @@
|
||||
../thrash/z-require-luminous
|
1
qa/suites/rados/multimon/z-require-luminous
Symbolic link
1
qa/suites/rados/multimon/z-require-luminous
Symbolic link
@ -0,0 +1 @@
|
||||
../thrash/z-require-luminous
|
1
qa/suites/rados/thrash-erasure-code/z-require-luminous
Symbolic link
1
qa/suites/rados/thrash-erasure-code/z-require-luminous
Symbolic link
@ -0,0 +1 @@
|
||||
../thrash/z-require-luminous
|
14
qa/suites/rados/thrash/z-require-luminous/at-end.yaml
Normal file
14
qa/suites/rados/thrash/z-require-luminous/at-end.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
# do not require luminous osds at mkfs time; only set flag at
|
||||
# the end of the test run, then do a final scrub (to convert any
|
||||
# legacy snapsets), and verify we are healthy.
|
||||
tasks:
|
||||
- exec_on_cleanup:
|
||||
mon.a:
|
||||
- ceph osd set require_luminous_osds
|
||||
overrides:
|
||||
ceph:
|
||||
conf:
|
||||
global:
|
||||
mon debug no require luminous: true
|
||||
thrashosds:
|
||||
chance_thrash_cluster_full: 0
|
1
qa/suites/rados/verify/z-require-luminous
Symbolic link
1
qa/suites/rados/verify/z-require-luminous
Symbolic link
@ -0,0 +1 @@
|
||||
../thrash/z-require-luminous
|
62
qa/tasks/exec_on_cleanup.py
Normal file
62
qa/tasks/exec_on_cleanup.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""
|
||||
Exececute custom commands during unwind/cleanup
|
||||
"""
|
||||
import logging
|
||||
import contextlib
|
||||
|
||||
from teuthology import misc as teuthology
|
||||
from teuthology import contextutil
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def task(ctx, config):
|
||||
"""
|
||||
Execute commands on a given role
|
||||
|
||||
tasks:
|
||||
- ceph:
|
||||
- kclient: [client.a]
|
||||
- exec:
|
||||
client.a:
|
||||
- "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"
|
||||
- "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"
|
||||
- interactive:
|
||||
|
||||
It stops and fails with the first command that does not return on success. It means
|
||||
that if the first command fails, the second won't run at all.
|
||||
|
||||
To avoid confusion it is recommended to explicitly enclose the commands in
|
||||
double quotes. For instance if the command is false (without double quotes) it will
|
||||
be interpreted as a boolean by the YAML parser.
|
||||
|
||||
:param ctx: Context
|
||||
:param config: Configuration
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
log.info('Executing custom commands...')
|
||||
assert isinstance(config, dict), "task exec got invalid config"
|
||||
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
|
||||
if 'all' in config and len(config) == 1:
|
||||
a = config['all']
|
||||
roles = teuthology.all_roles(ctx.cluster)
|
||||
config = dict((id_, a) for id_ in roles)
|
||||
|
||||
for role, ls in config.iteritems():
|
||||
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
|
||||
log.info('Running commands on role %s host %s', role, remote.name)
|
||||
for c in ls:
|
||||
c.replace('$TESTDIR', testdir)
|
||||
remote.run(
|
||||
args=[
|
||||
'sudo',
|
||||
'TESTDIR={tdir}'.format(tdir=testdir),
|
||||
'bash',
|
||||
'-c',
|
||||
c],
|
||||
)
|
||||
|
@ -370,6 +370,7 @@ OPTION(mon_debug_deprecated_as_obsolete, OPT_BOOL, false) // consider deprecated
|
||||
OPTION(mon_debug_dump_transactions, OPT_BOOL, false)
|
||||
OPTION(mon_debug_dump_json, OPT_BOOL, false)
|
||||
OPTION(mon_debug_dump_location, OPT_STR, "/var/log/ceph/$cluster-$name.tdump")
|
||||
OPTION(mon_debug_no_require_luminous, OPT_BOOL, false)
|
||||
OPTION(mon_inject_transaction_delay_max, OPT_DOUBLE, 10.0) // seconds
|
||||
OPTION(mon_inject_transaction_delay_probability, OPT_DOUBLE, 0) // range [0, 1]
|
||||
|
||||
|
@ -161,10 +161,11 @@ void OSDMonitor::create_initial()
|
||||
// new cluster should require latest by default
|
||||
newmap.set_flag(CEPH_OSDMAP_REQUIRE_JEWEL);
|
||||
newmap.set_flag(CEPH_OSDMAP_REQUIRE_KRAKEN);
|
||||
newmap.set_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS);
|
||||
|
||||
newmap.full_ratio = g_conf->mon_osd_full_ratio;
|
||||
newmap.nearfull_ratio = g_conf->mon_osd_nearfull_ratio;
|
||||
if (!g_conf->mon_debug_no_require_luminous) {
|
||||
newmap.set_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS);
|
||||
newmap.full_ratio = g_conf->mon_osd_full_ratio;
|
||||
newmap.nearfull_ratio = g_conf->mon_osd_nearfull_ratio;
|
||||
}
|
||||
|
||||
// encode into pending incremental
|
||||
newmap.encode(pending_inc.fullmap,
|
||||
@ -249,7 +250,9 @@ void OSDMonitor::update_from_paxos(bool *need_bootstrap)
|
||||
auto p = bl.begin();
|
||||
std::lock_guard<std::mutex> l(creating_pgs_lock);
|
||||
creating_pgs.decode(p);
|
||||
dout(7) << __func__ << " loading creating_pgs e" << creating_pgs.last_scan_epoch << dendl;
|
||||
dout(7) << __func__ << " loading creating_pgs last_scan_epoch "
|
||||
<< creating_pgs.last_scan_epoch
|
||||
<< " with " << creating_pgs.pgs.size() << " pgs" << dendl;
|
||||
}
|
||||
|
||||
// walk through incrementals
|
||||
@ -1179,11 +1182,17 @@ void OSDMonitor::encode_pending(MonitorDBStore::TransactionRef t)
|
||||
void OSDMonitor::trim_creating_pgs(creating_pgs_t* creating_pgs,
|
||||
const PGMap& pgm)
|
||||
{
|
||||
for (auto& pg : pgm.pg_stat) {
|
||||
auto created = creating_pgs->pgs.find(pg.first);
|
||||
if (created != creating_pgs->pgs.end()) {
|
||||
creating_pgs->pgs.erase(created);
|
||||
creating_pgs->created_pools.insert(pg.first.pool());
|
||||
auto p = creating_pgs->pgs.begin();
|
||||
while (p != creating_pgs->pgs.end()) {
|
||||
auto q = pgm.pg_stat.find(p->first);
|
||||
if (q != pgm.pg_stat.end() &&
|
||||
!(q->second.state & PG_STATE_CREATING)) {
|
||||
dout(20) << __func__ << " pgmap shows " << p->first << " is created"
|
||||
<< dendl;
|
||||
p = creating_pgs->pgs.erase(p);
|
||||
creating_pgs->created_pools.insert(q->first.pool());
|
||||
} else {
|
||||
++p;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3459,7 +3468,9 @@ void OSDMonitor::get_health(list<pair<health_status_t,string> >& summary,
|
||||
}
|
||||
|
||||
// warn about upgrade flags that can be set but are not.
|
||||
if (HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_LUMINOUS) &&
|
||||
if (g_conf->mon_debug_no_require_luminous) {
|
||||
// ignore these checks
|
||||
} else if (HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_LUMINOUS) &&
|
||||
!osdmap.test_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS)) {
|
||||
string msg = "all OSDs are running luminous or later but the"
|
||||
" 'require_luminous_osds' osdmap flag is not set";
|
||||
@ -7238,6 +7249,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
if (!osdmap.test_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS)) {
|
||||
ss << "you must set the require_luminous_osds flag to use this feature";
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
err = check_cluster_features(CEPH_FEATUREMASK_OSDMAP_REMAP, ss);
|
||||
if (err == -EAGAIN)
|
||||
goto wait;
|
||||
@ -7293,6 +7309,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
if (!osdmap.test_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS)) {
|
||||
ss << "you must set the require_luminous_osds flag to use this feature";
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
err = check_cluster_features(CEPH_FEATUREMASK_OSDMAP_REMAP, ss);
|
||||
if (err == -EAGAIN)
|
||||
goto wait;
|
||||
@ -7332,6 +7353,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
if (!osdmap.test_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS)) {
|
||||
ss << "you must set the require_luminous_osds flag to use this feature";
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
err = check_cluster_features(CEPH_FEATUREMASK_OSDMAP_REMAP, ss);
|
||||
if (err == -EAGAIN)
|
||||
goto wait;
|
||||
@ -7399,6 +7425,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op,
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
if (!osdmap.test_flag(CEPH_OSDMAP_REQUIRE_LUMINOUS)) {
|
||||
ss << "you must set the require_luminous_osds flag to use this feature";
|
||||
err = -EPERM;
|
||||
goto reply;
|
||||
}
|
||||
err = check_cluster_features(CEPH_FEATUREMASK_OSDMAP_REMAP, ss);
|
||||
if (err == -EAGAIN)
|
||||
goto wait;
|
||||
|
Loading…
Reference in New Issue
Block a user