qa/tasks/cephadm.py: create RBD pool

The ceph task already does this and a bunch of tests rely on this pool
being already present. Can be disabled by setting create_rbd_pool to False.

Signed-off-by: Neha Ojha <nojha@redhat.com>
This commit is contained in:
Neha Ojha 2021-01-05 17:38:52 +00:00
parent 20141ed67b
commit 276a4fb8b0

View File

@ -1017,6 +1017,29 @@ def crush_setup(ctx, config):
args=['ceph', 'osd', 'crush', 'tunables', profile])
yield
@contextlib.contextmanager
def create_rbd_pool(ctx, config):
cluster_name = config['cluster']
log.info('Waiting for OSDs to come up')
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
remote=ctx.ceph[cluster_name].bootstrap_remote,
ceph_cluster=cluster_name,
)
if config.get('create_rbd_pool', True):
log.info('Creating RBD pool')
_shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
args=['sudo', 'ceph', '--cluster', cluster_name,
'osd', 'pool', 'create', 'rbd', '8'])
_shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'pool', 'application', 'enable',
'rbd', 'rbd', '--yes-i-really-mean-it'
])
yield
@contextlib.contextmanager
def _bypass():
yield
@ -1224,6 +1247,7 @@ def task(ctx, config):
lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
lambda: ceph_clients(ctx=ctx, config=config),
lambda: create_rbd_pool(ctx=ctx, config=config),
):
ctx.managers[cluster_name] = CephManager(
ctx.ceph[cluster_name].bootstrap_remote,