Merge pull request #39639 from sebastian-philipp/cephadm-deploy-daemon-units-for-existing-osds

mgr/cephadm: Activate existing OSDs

Reviewed-by: Michael Fritch <mfritch@suse.com>
This commit is contained in:
Sebastian Wagner 2021-03-05 10:43:39 +01:00 committed by GitHub
commit 2844026c40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 68 additions and 10 deletions

View File

@ -217,3 +217,4 @@ Then, run bootstrap referencing this file::
cephadm bootstrap -c /root/ceph.conf ...

View File

@ -668,3 +668,16 @@ It is also possible to specify directly device paths in specific hosts like the
This can easily be done with other filters, like `size` or `vendor` as well.
Activate existing OSDs
======================
In case the OS of a host was reinstalled, existing OSDs need to be activated
again. For this use case, cephadm provides a wrapper for :ref:`ceph-volume-lvm-activate` that
activates all existing OSDs on a host.
.. prompt:: bash #
ceph cephadm osd activate <host>...
This will scan all existing disks for OSDs and deploy corresponding daemons.

View File

@ -1061,6 +1061,20 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
return False
return conf.last_modified > dt
@orchestrator._cli_write_command(
'cephadm osd activate'
)
def _osd_activate(self, host: List[str]) -> HandleCommandResult:
"""
Start OSD containers for existing OSDs
"""
@forall_hosts
def run(h: str) -> str:
return self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd')
return HandleCommandResult(stdout='\n'.join(run(host)))
def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection',
'remoto.backends.LegacyModuleExecute']:
"""

View File

@ -84,9 +84,17 @@ class OSDService(CephService):
raise RuntimeError(
'cephadm exited with an error code: %d, stderr:%s' % (
code, '\n'.join(err)))
return self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(),
replace_osd_ids)
def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str,
replace_osd_ids: Optional[List[str]] = None) -> str:
if replace_osd_ids is None:
replace_osd_ids = self.find_destroyed_osds().get(host, [])
assert replace_osd_ids is not None
# check result
out, err, code = CephadmServe(self.mgr)._run_cephadm(
osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json(
host, 'osd', 'ceph-volume',
[
'--',
@ -94,11 +102,6 @@ class OSDService(CephService):
'--format', 'json',
])
before_osd_uuid_map = self.mgr.get_osd_uuid_map(only_up=True)
try:
osds_elems = json.loads('\n'.join(out))
except ValueError:
logger.exception('Cannot decode JSON: \'%s\'' % '\n'.join(out))
osds_elems = {}
fsid = self.mgr._cluster_fsid
osd_uuid_map = self.mgr.get_osd_uuid_map()
created = []
@ -122,7 +125,7 @@ class OSDService(CephService):
created.append(osd_id)
daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec(
service_name=drive_group.service_name(),
service_name=service_name,
daemon_id=osd_id,
host=host,
daemon_type='osd',
@ -716,7 +719,8 @@ class OSDRemovalQueue(object):
if not osd.destroy():
raise orchestrator.OrchestratorError(
f"Could not destroy {osd}")
logger.info(f"Successfully destroyed old {osd} on {osd.hostname}; ready for replacement")
logger.info(
f"Successfully destroyed old {osd} on {osd.hostname}; ready for replacement")
else:
# purge from osdmap
if not osd.purge():

View File

@ -433,7 +433,7 @@ class TestCephadm(object):
'--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}')
_run_cephadm.assert_called_with(
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
@ -473,7 +473,7 @@ class TestCephadm(object):
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
error_ok=True, stdin='{"config": "", "keyring": ""}')
_run_cephadm.assert_called_with(
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.module.SpecStore.save")
@ -1088,3 +1088,29 @@ Traceback (most recent call last):
['--', 'inventory', '--format=json'], image='',
no_fsid=False),
]
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
def test_osd_activate(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.return_value = ('{}', '', 0)
with with_host(cephadm_module, 'test', refresh_hosts=False):
cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
'osds': [
{
'osd': 1,
'up_from': 0,
'uuid': 'uuid'
}
]
})
ceph_volume_lvm_list = {
'1': [{
'tags': {
'ceph.cluster_fsid': cephadm_module._cluster_fsid,
'ceph.osd_fsid': 'uuid'
}
}]
}
_run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0)
assert cephadm_module._osd_activate(
['test']).stdout == "Created osd(s) 1 on host 'test'"