qa/tasks/ceph: provide configuration for setting configs via mon

These configs may be set using:

ceph:
  cluster-config:
    entity:
      foo: bar

same as the current:

ceph:
  config:
    entity:
      foo: bar

The configs will be set in parallel using the `ceph config set` command.

The main benefit here is to avoid using the ceph.conf to set configs which
cannot be overriden using subsequent `ceph config` command. The only way to
override is to change the ceph.conf in the test (yuck) or the admin socket
(which gets reset when the daemon restarts).

Finally, we can now exploit the `ceph config reset` command will let us
trivially rollback config changes after a test completes. That is exposed
as the `ctx.config_epoch` variable.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2023-08-07 21:09:21 -04:00
parent ddc57388ce
commit 9d485ae1f4
No known key found for this signature in database
GPG Key ID: FA47FD0B0367D313
4 changed files with 132 additions and 0 deletions

View File

@ -360,6 +360,68 @@ def crush_setup(ctx, config):
yield
@contextlib.contextmanager
def module_setup(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
modules = config.get('mgr-modules', [])
for m in modules:
m = str(m)
cmd = [
'sudo',
'ceph',
'--cluster',
cluster_name,
'mgr',
'module',
'emable',
m,
]
log.info("enabling module %s", m)
mon_remote.run(args=cmd)
yield
@contextlib.contextmanager
def conf_setup(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
configs = config.get('cluster-conf', {})
procs = []
for section, confs in configs.items():
section = str(section)
for k, v in confs.items():
k = str(k).replace(' ', '_') # pre-pacific compatibility
v = str(v)
cmd = [
'sudo',
'ceph',
'--cluster',
cluster_name,
'config',
'set',
section,
k,
v,
]
log.info("setting config [%s] %s = %s", section, k, v)
procs.append(mon_remote.run(args=cmd, wait=False))
log.debug("set %d configs", len(procs))
for p in procs:
log.debug("waiting for %s", p)
p.wait()
yield
@contextlib.contextmanager
def conf_epoch(ctx, config):
cm = ctx.managers[config['cluster']]
cm.save_conf_epoch()
yield
@contextlib.contextmanager
def check_enable_crimson(ctx, config):
# enable crimson-osds if crimson
@ -1899,7 +1961,9 @@ def task(ctx, config):
mon_bind_addrvec=config.get('mon_bind_addrvec', True),
)),
lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
lambda: module_setup(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='mgr'),
lambda: conf_setup(ctx=ctx, config=config),
lambda: crush_setup(ctx=ctx, config=config),
lambda: check_enable_crimson(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
@ -1908,6 +1972,7 @@ def task(ctx, config):
lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
lambda: cephfs_setup(ctx=ctx, config=config),
lambda: watchdog_setup(ctx=ctx, config=config),
lambda: conf_epoch(ctx=ctx, config=config),
]
with contextutil.nested(*subtasks):

View File

@ -1599,6 +1599,11 @@ class CephManager:
timeout = kwargs.pop('timeout', 120)
return ['sudo'] + self.pre + ['timeout', f'{timeout}', 'ceph',
'--cluster', self.cluster]
def save_conf_epoch(self):
p = self.ceph("config log 1 --format=json")
J = json.loads(p.stdout.getvalue())
self.ctx.conf_epoch = J[0]["version"]
log.info("config epoch is %d", self.ctx.conf_epoch)
def ceph(self, cmd, **kwargs):
"""

View File

@ -1747,6 +1747,63 @@ def crush_setup(ctx, config):
yield
@contextlib.contextmanager
def module_setup(ctx, config):
cluster_name = config['cluster']
remote = ctx.ceph[cluster_name].bootstrap_remote
modules = config.get('mgr-modules', [])
for m in modules:
m = str(m)
cmd = [
'sudo',
'ceph',
'--cluster',
cluster_name,
'mgr',
'module',
'enable',
m,
]
log.info("enabling module %s", m)
_shell(ctx, cluster_name, remote, args=cmd)
yield
@contextlib.contextmanager
def conf_setup(ctx, config):
cluster_name = config['cluster']
remote = ctx.ceph[cluster_name].bootstrap_remote
configs = config.get('cluster-conf', {})
procs = []
for section, confs in configs.items():
section = str(section)
for k, v in confs.items():
k = str(k).replace(' ', '_') # pre-pacific compatibility
v = str(v)
cmd = [
'ceph',
'config',
'set',
section,
k,
v,
]
log.info("setting config [%s] %s = %s", section, k, v)
procs.append(_shell(ctx, cluster_name, remote, args=cmd, wait=False))
log.debug("set %d configs", len(procs))
for p in procs:
log.debug("waiting for %s", p)
p.wait()
yield
@contextlib.contextmanager
def conf_epoch(ctx, config):
cm = ctx.managers[config['cluster']]
cm.save_conf_epoch()
yield
@contextlib.contextmanager
def create_rbd_pool(ctx, config):
if config.get('create_rbd_pool', False):
@ -2249,7 +2306,9 @@ def task(ctx, config):
lambda: crush_setup(ctx=ctx, config=config),
lambda: ceph_mons(ctx=ctx, config=config),
lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
lambda: module_setup(ctx=ctx, config=config),
lambda: ceph_mgrs(ctx=ctx, config=config),
lambda: conf_setup(ctx=ctx, config=config),
lambda: ceph_osds(ctx=ctx, config=config),
lambda: ceph_mdss(ctx=ctx, config=config),
lambda: cephfs_setup(ctx=ctx, config=config),
@ -2261,6 +2320,7 @@ def task(ctx, config):
lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
lambda: ceph_clients(ctx=ctx, config=config),
lambda: create_rbd_pool(ctx=ctx, config=config),
lambda: conf_epoch(ctx=ctx, config=config),
):
try:
if config.get('wait-for-healthy', True):

View File

@ -820,6 +820,8 @@ class LocalCephManager(CephManager):
self.testdir = None
self.RADOS_CMD = [RADOS_CMD]
self.save_conf_epoch()
def get_ceph_cmd(self, **kwargs):
return [CEPH_CMD]