Merge PR into master

* refs/pull/43287/head:
	mgr/rook, qa/tasks/rook: change rgw daemon service name
	mgr/rook: fix placement_spec_to_node_selector
	mgr/rook: orch rm no longer uses rook api delete
	qa/tasks/rook: fix cluster deletion hanging due to CephObjectStore CR
	mgr/rook: use default replication size in orch apply rgw
	mgr/rook: add placement specs to apply rgw

Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2021-09-29 14:38:47 -04:00
commit e3bede0008
4 changed files with 58 additions and 29 deletions
qa
suites/orch/rook/smoke
tasks
src/pybind/mgr/rook

View File

@ -6,3 +6,4 @@ tasks:
- ceph orch ls
- ceph orch host ls
- ceph orch device ls
- ceph orch apply rgw foo

View File

@ -646,4 +646,21 @@ def task(ctx, config):
yield
finally:
to_remove = []
ret = _shell(ctx, config, ['ceph', 'orch', 'ls', '-f', 'json'], stdout=BytesIO())
if ret.exitstatus == 0:
r = json.loads(ret.stdout.getvalue().decode('utf-8'))
for service in r:
if service['service_type'] in ['rgw', 'mds', 'nfs']:
_shell(ctx, config, ['ceph', 'orch', 'rm', service['service_name']])
to_remove.append(service['service_name'])
with safe_while(sleep=10, tries=90, action="waiting for service removal") as proceed:
while proceed():
ret = _shell(ctx, config, ['ceph', 'orch', 'ls', '-f', 'json'], stdout=BytesIO())
if ret.exitstatus == 0:
r = json.loads(ret.stdout.getvalue().decode('utf-8'))
still_up = [service['service_name'] for service in r]
matches = set(still_up).intersection(to_remove)
if not matches:
break
log.info('Tearing down rook')

View File

@ -274,10 +274,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
if service_type == 'mds' or service_type is None:
# CephFilesystems
all_fs = self.rook_cluster.rook_api_get(
"cephfilesystems/")
self.log.debug('CephFilesystems %s' % all_fs)
for fs in all_fs.get('items', []):
all_fs = self.rook_cluster.get_resource("cephfilesystems")
for fs in all_fs:
svc = 'mds.' + fs['metadata']['name']
if svc in spec:
continue
@ -299,13 +297,11 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
if service_type == 'rgw' or service_type is None:
# CephObjectstores
all_zones = self.rook_cluster.rook_api_get(
"cephobjectstores/")
self.log.debug('CephObjectstores %s' % all_zones)
for zone in all_zones.get('items', []):
all_zones = self.rook_cluster.get_resource("cephobjectstores")
for zone in all_zones:
rgw_realm = zone['metadata']['name']
rgw_zone = rgw_realm
svc = 'rgw.' + rgw_realm + '.' + rgw_zone
svc = 'rgw.' + rgw_realm
if svc in spec:
continue
active = zone['spec']['gateway']['instances'];
@ -317,7 +313,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
port = zone['spec']['gateway']['port'] or 80
spec[svc] = orchestrator.ServiceDescription(
spec=RGWSpec(
service_id=rgw_realm + '.' + rgw_zone,
service_id=zone['metadata']['name'],
rgw_realm=rgw_realm,
rgw_zone=rgw_zone,
ssl=ssl,
@ -331,10 +327,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
if service_type == 'nfs' or service_type is None:
# CephNFSes
all_nfs = self.rook_cluster.rook_api_get(
"cephnfses/")
self.log.warning('CephNFS %s' % all_nfs)
for nfs in all_nfs.get('items', []):
all_nfs = self.rook_cluster.get_resource("cephnfses")
for nfs in all_nfs:
nfs_name = nfs['metadata']['name']
svc = 'nfs.' + nfs_name
if svc in spec:
@ -450,7 +444,9 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
@handle_orch_error
def apply_rgw(self, spec):
# type: (RGWSpec) -> str
return self.rook_cluster.apply_objectstore(spec)
num_of_osds = self.get_ceph_option('osd_pool_default_size')
assert type(num_of_osds) is int
return self.rook_cluster.apply_objectstore(spec, num_of_osds)
@handle_orch_error
def apply_nfs(self, spec):

View File

@ -894,7 +894,7 @@ class RookCluster(object):
self.coreV1_api.patch_node(host, matching_node)
return OrchResult(f'Removed {label} label from {host}')
def apply_objectstore(self, spec: RGWSpec) -> str:
def apply_objectstore(self, spec: RGWSpec, num_replicas: int) -> str:
assert spec.service_id is not None
name = spec.service_id
@ -907,7 +907,7 @@ class RookCluster(object):
# translate . to - (fingers crossed!) instead.
name = spec.service_id.replace('.', '-')
all_hosts = self.get_hosts()
def _create_zone() -> cos.CephObjectStore:
port = None
secure_port = None
@ -926,6 +926,27 @@ class RookCluster(object):
port=port,
securePort=secure_port,
instances=spec.placement.count or 1,
placement=cos.Placement(
cos.NodeAffinity(
requiredDuringSchedulingIgnoredDuringExecution=cos.RequiredDuringSchedulingIgnoredDuringExecution(
nodeSelectorTerms=cos.NodeSelectorTermsList(
[
placement_spec_to_node_selector(spec.placement, all_hosts)
]
)
)
)
)
),
dataPool=cos.DataPool(
replicated=cos.Replicated(
size=num_replicas
)
),
metadataPool=cos.MetadataPool(
replicated=cos.Replicated(
size=num_replicas
)
)
)
)
@ -986,20 +1007,14 @@ class RookCluster(object):
_update_nfs, _create_nfs)
def rm_service(self, rooktype: str, service_id: str) -> str:
self.customObjects_api.delete_namespaced_custom_object(group="ceph.rook.io", version="v1", namespace="rook-ceph", plural=rooktype, name=service_id)
objpath = "{0}/{1}".format(rooktype, service_id)
try:
self.rook_api_delete(objpath)
except ApiException as e:
if e.status == 404:
log.info("{0} service '{1}' does not exist".format(rooktype, service_id))
# Idempotent, succeed.
else:
raise
return f'Removed {objpath}'
def get_resource(self, resource_type: str) -> Iterable:
custom_objects: KubernetesCustomResource = KubernetesCustomResource(self.customObjects_api.list_namespaced_custom_object, group="ceph.rook.io", version="v1", namespace="rook-ceph", plural=resource_type)
return custom_objects.items
def can_create_osd(self) -> bool:
current_cluster = self.rook_api_get(
"cephclusters/{0}".format(self.rook_env.cluster_name))
@ -1286,7 +1301,7 @@ def placement_spec_to_node_selector(spec: PlacementSpec, all_hosts: List) -> ccl
values=ccl.CrdObjectList(host_list)
)
)
if spec.host_pattern == "*":
if spec.host_pattern == "*" or (not spec.label and not spec.hosts and not spec.host_pattern):
res.matchExpressions.append(
ccl.MatchExpressionsItem(
key="kubernetes.io/hostname",