mgr/cephadm: streamline rgw deployment

cephadm will create realm, zonegroup, and zone if needed before creating rgw service

fixes: https://tracker.ceph.com/issues/43681
Signed-off-by: Daniel-Pivonka <dpivonka@redhat.com>
This commit is contained in:
Daniel-Pivonka 2020-07-16 08:24:47 -04:00
parent 4ba83be982
commit 131001c453
6 changed files with 147 additions and 27 deletions

View File

@ -382,18 +382,6 @@ that configuration isn't already in place (usually in the
daemons will start up with default settings (e.g., binding to port
80).
If a realm has not been created yet, first create a realm::
# radosgw-admin realm create --rgw-realm=<realm-name> --default
Next create a new zonegroup::
# radosgw-admin zonegroup create --rgw-zonegroup=<zonegroup-name> --master --default
Next create a zone::
# radosgw-admin zone create --rgw-zonegroup=<zonegroup-name> --rgw-zone=<zone-name> --master --default
To deploy a set of radosgw daemons for a particular realm and zone::
# ceph orch apply rgw *<realm-name>* *<zone-name>* --placement="*<num-daemons>* [*<host1>* ...]"
@ -401,11 +389,18 @@ To deploy a set of radosgw daemons for a particular realm and zone::
For example, to deploy 2 rgw daemons serving the *myorg* realm and the *us-east-1*
zone on *myhost1* and *myhost2*::
# radosgw-admin realm create --rgw-realm=myorg --default
# radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
# radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=us-east-1 --master --default
# ceph orch apply rgw myorg us-east-1 --placement="2 myhost1 myhost2"
Cephadm will wait for a healthy cluster and automatically create the supplied realm and zone if they do not exist before deploying the rgw daemon(s)
Alternatively, the realm, zonegroup, and zone can be manually created using ``radosgw-admin`` commands::
# radosgw-admin realm create --rgw-realm=<realm-name> --default
# radosgw-admin zonegroup create --rgw-zonegroup=<zonegroup-name> --master --default
# radosgw-admin zone create --rgw-zonegroup=<zonegroup-name> --rgw-zone=<zone-name> --master --default
See :ref:`orchestrator-cli-placement-spec` for details of the placement specification.
Deploying NFS ganesha

View File

@ -22,7 +22,7 @@ import subprocess
from ceph.deployment import inventory
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.service_spec import \
NFSServiceSpec, ServiceSpec, PlacementSpec, assert_valid_host
NFSServiceSpec, RGWSpec, ServiceSpec, PlacementSpec, assert_valid_host
from cephadm.services.cephadmservice import CephadmDaemonSpec
from mgr_module import MgrModule, HandleCommandResult
@ -1979,12 +1979,18 @@ you may want to run:
self.log.debug('Hosts that will loose daemons: %s' % remove_daemon_hosts)
for host, network, name in add_daemon_hosts:
if not did_config and config_func:
config_func(spec)
did_config = True
daemon_id = self.get_unique_name(daemon_type, host, daemons,
prefix=spec.service_id,
forcename=name)
if not did_config and config_func:
if daemon_type == 'rgw':
rgw_config_func = cast(Callable[[RGWSpec, str], None], config_func)
rgw_config_func(cast(RGWSpec, spec), daemon_id)
else:
config_func(spec)
did_config = True
daemon_spec = self.cephadm_services[daemon_type].make_daemon_spec(host, daemon_id, network, spec)
self.log.debug('Placing %s.%s on host %s' % (
daemon_type, daemon_id, host))
@ -2119,14 +2125,21 @@ you may want to run:
raise OrchestratorError('too few hosts: want %d, have %s' % (
count, hosts))
if config_func:
config_func(spec)
did_config = False
args = [] # type: List[CephadmDaemonSpec]
for host, network, name in hosts:
daemon_id = self.get_unique_name(daemon_type, host, daemons,
prefix=spec.service_id,
forcename=name)
if not did_config and config_func:
if daemon_type == 'rgw':
config_func(spec, daemon_id)
else:
config_func(spec)
did_config = True
daemon_spec = self.cephadm_services[daemon_type].make_daemon_spec(host, daemon_id, network, spec)
self.log.debug('Placing %s.%s on host %s' % (
daemon_type, daemon_id, host))

View File

@ -1,5 +1,6 @@
import json
import logging
import subprocess
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, List, Callable, Any, TypeVar, Generic, Optional, Dict, Any, Tuple
@ -353,9 +354,13 @@ class MdsService(CephadmService):
class RgwService(CephadmService):
TYPE = 'rgw'
def config(self, spec: RGWSpec) -> None:
def config(self, spec: RGWSpec, rgw_id: str):
assert self.TYPE == spec.service_type
# create realm, zonegroup, and zone if needed
self.create_realm_zonegroup_zone(spec, rgw_id)
# ensure rgw_realm and rgw_zone is set for these daemons
ret, out, err = self.mgr.check_mon_command({
'prefix': 'config set',
@ -414,6 +419,13 @@ class RgwService(CephadmService):
assert self.TYPE == daemon_spec.daemon_type
rgw_id, host = daemon_spec.daemon_id, daemon_spec.host
keyring = self.get_keyring(rgw_id)
daemon_spec.keyring = keyring
return self.mgr._create_daemon(daemon_spec)
def get_keyring(self, rgw_id: str):
ret, keyring, err = self.mgr.check_mon_command({
'prefix': 'auth get-or-create',
'entity': f"{utils.name_to_config_section('rgw')}.{rgw_id}",
@ -421,10 +433,94 @@ class RgwService(CephadmService):
'mgr', 'allow rw',
'osd', 'allow rwx'],
})
return keyring
daemon_spec.keyring = keyring
def create_realm_zonegroup_zone(self, spec: RGWSpec, rgw_id: str):
if utils.get_cluster_health(self.mgr) != 'HEALTH_OK':
raise OrchestratorError('Health not ok, will try agin when health ok')
return self.mgr._create_daemon(daemon_spec)
#get keyring needed to run rados commands and strip out just the keyring
keyring = self.get_keyring(rgw_id).split('key = ',1)[1].rstrip()
# We can call radosgw-admin within the container, cause cephadm gives the MGR the required keyring permissions
# get realms
cmd = ['radosgw-admin',
'--key=%s'%keyring,
'--user', 'rgw.%s'%rgw_id,
'realm', 'list',
'--format=json']
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# create realm if needed
cmd = ['radosgw-admin',
'--key=%s'%keyring,
'--user', 'rgw.%s'%rgw_id,
'realm', 'create',
'--rgw-realm=%s'%spec.rgw_realm,
'--default']
if not result.stdout:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.mgr.log.info('created realm: %s'%spec.rgw_realm)
else:
try:
j = json.loads(result.stdout)
if 'realms' not in j or spec.rgw_realm not in j['realms']:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.mgr.log.info('created realm: %s'%spec.rgw_realm)
except Exception as e:
raise OrchestratorError('failed to parse realm info')
# get zonegroup
cmd = ['radosgw-admin',
'--key=%s'%keyring,
'--user', 'rgw.%s'%rgw_id,
'zonegroup', 'list',
'--format=json']
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#create zonegroup if needed
cmd = ['radosgw-admin',
'--key=%s'%keyring,
'--user', 'rgw.%s'%rgw_id,
'zonegroup', 'create',
'--rgw-zonegroup=default',
'--master', '--default']
if not result.stdout:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.mgr.log.info('created zonegroup: default')
else:
try:
j = json.loads(result.stdout)
if 'zonegroups' not in j or 'default' not in j['zonegroups']:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.mgr.log.info('created zonegroup: default')
except Exception as e:
raise OrchestratorError('failed to parse zonegroup info')
#get zones
cmd = ['radosgw-admin',
'--key=%s'%keyring,
'--user', 'rgw.%s'%rgw_id,
'zone', 'list',
'--format=json']
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#create zone if needed
cmd = ['radosgw-admin',
'--key=%s'%keyring,
'--user', 'rgw.%s'%rgw_id,
'zone', 'create',
'--rgw-zonegroup=default',
'--rgw-zone=%s'%spec.rgw_zone,
'--master', '--default']
if not result.stdout:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.mgr.log.info('created zone: %s'%spec.rgw_zone)
else:
try:
j = json.loads(result.stdout)
if 'zones' not in j or spec.rgw_zone not in j['zones']:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.mgr.log.info('created zone: %s'%spec.rgw_zone)
except Exception as e:
raise OrchestratorError('failed to parse zone info')
class RbdMirrorService(CephadmService):

View File

@ -377,6 +377,7 @@ class TestCephadm(object):
assert out == set()
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None)
def test_rgw_update(self, cephadm_module):
with with_host(cephadm_module, 'host1'):
with with_host(cephadm_module, 'host2'):
@ -426,6 +427,7 @@ class TestCephadm(object):
]
)
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None)
def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
with with_host(cephadm_module, 'test'):
spec.placement = PlacementSpec(hosts=['test'], count=1)

View File

@ -7,6 +7,7 @@ from orchestrator import ServiceDescription
from tests import mock
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
@mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None)
def test_service_ls(cephadm_module: CephadmOrchestrator):
with with_host(cephadm_module, 'host1'):
with with_host(cephadm_module, 'host2'):

View File

@ -1,7 +1,8 @@
import logging
import re
import json
from functools import wraps
from typing import Optional, Callable, TypeVar, List
from orchestrator import OrchestratorError
@ -43,7 +44,6 @@ def name_to_auth_entity(daemon_type, # type: str
else:
raise OrchestratorError("unknown auth entity name")
def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]:
@wraps(f)
def forall_hosts_wrapper(*args) -> List[T]:
@ -73,4 +73,17 @@ def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]:
return CephadmOrchestrator.instance._worker_pool.map(do_work, vals)
return forall_hosts_wrapper
return forall_hosts_wrapper
def get_cluster_health(mgr):
# check cluster health
ret, out, err = mgr.check_mon_command({
'prefix': 'health',
'format': 'json',
})
try:
j = json.loads(out)
except Exception as e:
raise OrchestratorError('failed to parse health status')
return j['status']