mirror of
https://github.com/ceph/ceph
synced 2025-01-20 10:01:45 +00:00
Merge pull request #39639 from sebastian-philipp/cephadm-deploy-daemon-units-for-existing-osds
mgr/cephadm: Activate existing OSDs Reviewed-by: Michael Fritch <mfritch@suse.com>
This commit is contained in:
commit
2844026c40
@ -217,3 +217,4 @@ Then, run bootstrap referencing this file::
|
|||||||
|
|
||||||
cephadm bootstrap -c /root/ceph.conf ...
|
cephadm bootstrap -c /root/ceph.conf ...
|
||||||
|
|
||||||
|
|
||||||
|
@ -668,3 +668,16 @@ It is also possible to specify directly device paths in specific hosts like the
|
|||||||
|
|
||||||
|
|
||||||
This can easily be done with other filters, like `size` or `vendor` as well.
|
This can easily be done with other filters, like `size` or `vendor` as well.
|
||||||
|
|
||||||
|
Activate existing OSDs
|
||||||
|
======================
|
||||||
|
|
||||||
|
In case the OS of a host was reinstalled, existing OSDs need to be activated
|
||||||
|
again. For this use case, cephadm provides a wrapper for :ref:`ceph-volume-lvm-activate` that
|
||||||
|
activates all existing OSDs on a host.
|
||||||
|
|
||||||
|
.. prompt:: bash #
|
||||||
|
|
||||||
|
ceph cephadm osd activate <host>...
|
||||||
|
|
||||||
|
This will scan all existing disks for OSDs and deploy corresponding daemons.
|
||||||
|
@ -1061,6 +1061,20 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
|
|||||||
return False
|
return False
|
||||||
return conf.last_modified > dt
|
return conf.last_modified > dt
|
||||||
|
|
||||||
|
@orchestrator._cli_write_command(
|
||||||
|
'cephadm osd activate'
|
||||||
|
)
|
||||||
|
def _osd_activate(self, host: List[str]) -> HandleCommandResult:
|
||||||
|
"""
|
||||||
|
Start OSD containers for existing OSDs
|
||||||
|
"""
|
||||||
|
|
||||||
|
@forall_hosts
|
||||||
|
def run(h: str) -> str:
|
||||||
|
return self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd')
|
||||||
|
|
||||||
|
return HandleCommandResult(stdout='\n'.join(run(host)))
|
||||||
|
|
||||||
def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection',
|
def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection',
|
||||||
'remoto.backends.LegacyModuleExecute']:
|
'remoto.backends.LegacyModuleExecute']:
|
||||||
"""
|
"""
|
||||||
|
@ -84,9 +84,17 @@ class OSDService(CephService):
|
|||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
'cephadm exited with an error code: %d, stderr:%s' % (
|
'cephadm exited with an error code: %d, stderr:%s' % (
|
||||||
code, '\n'.join(err)))
|
code, '\n'.join(err)))
|
||||||
|
return self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(),
|
||||||
|
replace_osd_ids)
|
||||||
|
|
||||||
|
def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str,
|
||||||
|
replace_osd_ids: Optional[List[str]] = None) -> str:
|
||||||
|
|
||||||
|
if replace_osd_ids is None:
|
||||||
|
replace_osd_ids = self.find_destroyed_osds().get(host, [])
|
||||||
|
assert replace_osd_ids is not None
|
||||||
# check result
|
# check result
|
||||||
out, err, code = CephadmServe(self.mgr)._run_cephadm(
|
osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json(
|
||||||
host, 'osd', 'ceph-volume',
|
host, 'osd', 'ceph-volume',
|
||||||
[
|
[
|
||||||
'--',
|
'--',
|
||||||
@ -94,11 +102,6 @@ class OSDService(CephService):
|
|||||||
'--format', 'json',
|
'--format', 'json',
|
||||||
])
|
])
|
||||||
before_osd_uuid_map = self.mgr.get_osd_uuid_map(only_up=True)
|
before_osd_uuid_map = self.mgr.get_osd_uuid_map(only_up=True)
|
||||||
try:
|
|
||||||
osds_elems = json.loads('\n'.join(out))
|
|
||||||
except ValueError:
|
|
||||||
logger.exception('Cannot decode JSON: \'%s\'' % '\n'.join(out))
|
|
||||||
osds_elems = {}
|
|
||||||
fsid = self.mgr._cluster_fsid
|
fsid = self.mgr._cluster_fsid
|
||||||
osd_uuid_map = self.mgr.get_osd_uuid_map()
|
osd_uuid_map = self.mgr.get_osd_uuid_map()
|
||||||
created = []
|
created = []
|
||||||
@ -122,7 +125,7 @@ class OSDService(CephService):
|
|||||||
|
|
||||||
created.append(osd_id)
|
created.append(osd_id)
|
||||||
daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec(
|
daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec(
|
||||||
service_name=drive_group.service_name(),
|
service_name=service_name,
|
||||||
daemon_id=osd_id,
|
daemon_id=osd_id,
|
||||||
host=host,
|
host=host,
|
||||||
daemon_type='osd',
|
daemon_type='osd',
|
||||||
@ -716,7 +719,8 @@ class OSDRemovalQueue(object):
|
|||||||
if not osd.destroy():
|
if not osd.destroy():
|
||||||
raise orchestrator.OrchestratorError(
|
raise orchestrator.OrchestratorError(
|
||||||
f"Could not destroy {osd}")
|
f"Could not destroy {osd}")
|
||||||
logger.info(f"Successfully destroyed old {osd} on {osd.hostname}; ready for replacement")
|
logger.info(
|
||||||
|
f"Successfully destroyed old {osd} on {osd.hostname}; ready for replacement")
|
||||||
else:
|
else:
|
||||||
# purge from osdmap
|
# purge from osdmap
|
||||||
if not osd.purge():
|
if not osd.purge():
|
||||||
|
@ -433,7 +433,7 @@ class TestCephadm(object):
|
|||||||
'--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
|
'--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
|
||||||
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}')
|
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}')
|
||||||
_run_cephadm.assert_called_with(
|
_run_cephadm.assert_called_with(
|
||||||
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
|
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False)
|
||||||
|
|
||||||
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
|
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
|
||||||
def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
|
def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
|
||||||
@ -473,7 +473,7 @@ class TestCephadm(object):
|
|||||||
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
|
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
|
||||||
error_ok=True, stdin='{"config": "", "keyring": ""}')
|
error_ok=True, stdin='{"config": "", "keyring": ""}')
|
||||||
_run_cephadm.assert_called_with(
|
_run_cephadm.assert_called_with(
|
||||||
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
|
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False)
|
||||||
|
|
||||||
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
|
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
|
||||||
@mock.patch("cephadm.module.SpecStore.save")
|
@mock.patch("cephadm.module.SpecStore.save")
|
||||||
@ -1088,3 +1088,29 @@ Traceback (most recent call last):
|
|||||||
['--', 'inventory', '--format=json'], image='',
|
['--', 'inventory', '--format=json'], image='',
|
||||||
no_fsid=False),
|
no_fsid=False),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
|
||||||
|
def test_osd_activate(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
|
||||||
|
_run_cephadm.return_value = ('{}', '', 0)
|
||||||
|
with with_host(cephadm_module, 'test', refresh_hosts=False):
|
||||||
|
cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
|
||||||
|
'osds': [
|
||||||
|
{
|
||||||
|
'osd': 1,
|
||||||
|
'up_from': 0,
|
||||||
|
'uuid': 'uuid'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
ceph_volume_lvm_list = {
|
||||||
|
'1': [{
|
||||||
|
'tags': {
|
||||||
|
'ceph.cluster_fsid': cephadm_module._cluster_fsid,
|
||||||
|
'ceph.osd_fsid': 'uuid'
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
_run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0)
|
||||||
|
assert cephadm_module._osd_activate(
|
||||||
|
['test']).stdout == "Created osd(s) 1 on host 'test'"
|
||||||
|
Loading…
Reference in New Issue
Block a user