Merge PR #34061 into master

* refs/pull/34061/head:
	mgr/orch: Add `ceph orch ls --export`
	mgr/dashboard: adapt to new `ServiceDescription.to_json()`
	python-common: add `service_name` to `ServiceSpec.to_json`
	python-common: make ServiceSpec and ServiceDescription compatible
	src/ceph.in: add yaml to known formats
	mgr/orch: add yaml to `orch ls`
	mgr/orch: remove `orch spec dump`
	python-common: reorder RGWSpec arguments
	python-common: prevent ServiceSpec of wrong type
	pybind/mgr: tox.ini: omit cov report
	mgr/cephadm: test describe_service
	mgr/orch: ServiceDescription: change json representation
	mgr/orch: ServiceDescription: Make spec a requirement

Reviewed-by: Kiefer Chang <kiefer.chang@suse.com>
Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2020-03-29 07:17:37 -05:00
commit 48b6cc6182
15 changed files with 275 additions and 139 deletions

View File

@ -206,18 +206,29 @@ services of a particular type via optional --type parameter
::
ceph orch ps
ceph orch service ls [--host host] [--svc_type type] [--refresh]
ceph orch ls [--service_type type] [--service_name name] [--export] [--format f] [--refresh]
Discover the status of a particular service or daemons::
ceph orch service ls --svc_type type --svc_id <name> [--refresh]
ceph orch ls --service_type type --service_name <name> [--refresh]
Export the service specs known to the orchestrator as yaml in format
that is compatible to ``ceph orch apply -i``::
ceph orch ls --export
Daemon Status
=============
Print a list of all daemons known to the orchestrator::
ceph orch ps [--hostname host] [--daemon_type type] [--service_name name] [--daemon_id id] [--format f] [--refresh]
Query the status of a particular service instance (mon, osd, mds, rgw). For OSDs
the id is the numeric OSD ID, for MDS services it is the file system name::
ceph orch daemon status <type> <instance-name> [--refresh]
ceph orch ps --daemon_type osd --daemon_id 0
.. _orchestrator-cli-cephfs:

View File

@ -333,7 +333,7 @@ def parse_cmdargs(args=None, target=''):
help="make less verbose")
parser.add_argument('-f', '--format', choices=['json', 'json-pretty',
'xml', 'xml-pretty', 'plain'], dest='output_format')
'xml', 'xml-pretty', 'plain', 'yaml'], dest='output_format')
parser.add_argument('--connect-timeout', dest='cluster_timeout',
type=int,

View File

@ -1816,18 +1816,25 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
continue
if dd.daemon_type == 'osd':
continue # ignore OSDs for now
spec = None
if dd.service_name() in self.spec_store.specs:
spec = self.spec_store.specs[dd.service_name()]
else:
spec = ServiceSpec(
unmanaged=True,
service_type=dd.daemon_type,
service_id=dd.service_id(),
placement=PlacementSpec(
hosts=[dd.hostname]
)
)
if n not in sm:
sm[n] = orchestrator.ServiceDescription(
service_name=n,
last_refresh=dd.last_refresh,
container_image_id=dd.container_image_id,
container_image_name=dd.container_image_name,
spec=spec,
)
if spec:
if dd.service_name() in self.spec_store.specs:
sm[n].size = self._get_spec_size(spec)
sm[n].created = self.spec_store.spec_created[dd.service_name()]
else:
@ -1848,12 +1855,11 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
if service_name is not None and service_name != n:
continue
sm[n] = orchestrator.ServiceDescription(
service_name=n,
spec=spec,
size=self._get_spec_size(spec),
running=0,
)
return [s for n, s in sm.items()]
return list(sm.values())
@trivial_completion
def list_daemons(self, service_name=None, daemon_type=None, daemon_id=None,
@ -3292,13 +3298,6 @@ receivers:
"""
return self.rm_util.report
@trivial_completion
def list_specs(self, service_name=None):
"""
Loads all entries from the service_spec mon_store root.
"""
return self.spec_store.find(service_name=service_name)
class BaseScheduler(object):
"""

View File

@ -71,6 +71,58 @@ class TestCephadm(object):
c = cephadm_module.list_daemons(refresh=True)
assert wait(cephadm_module, c) == []
ps = PlacementSpec(hosts=['test'], count=1)
c = cephadm_module.add_mds(ServiceSpec('mds', 'name', placement=ps))
[out] = wait(cephadm_module, c)
match_glob(out, "Deployed mds.name.* on host 'test'")
c = cephadm_module.list_daemons()
def remove_id(dd):
out = dd.to_json()
del out['daemon_id']
return out
assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [
{
'daemon_type': 'mds',
'hostname': 'test',
'status': 1,
'status_desc': 'starting'}
]
ps = PlacementSpec(hosts=['test'], count=1)
spec = ServiceSpec('rgw', 'r.z', placement=ps)
c = cephadm_module.apply_rgw(spec)
assert wait(cephadm_module, c) == 'Scheduled rgw update...'
c = cephadm_module.describe_service()
out = [o.to_json() for o in wait(cephadm_module, c)]
expected = [
{
'placement': {'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]},
'service_id': 'name',
'service_name': 'mds.name',
'service_type': 'mds',
'status': {'running': 1, 'size': 0},
'unmanaged': True
},
{
'placement': {
'count': 1,
'hosts': [{'hostname': 'test', 'name': '', 'network': ''}]
},
'rgw_realm': 'r',
'rgw_zone': 'z',
'service_id': 'r.z',
'service_name': 'rgw.r.z',
'service_type': 'rgw',
'status': {'running': 0, 'size': 1}
}
]
assert out == expected
assert [ServiceDescription.from_json(o).to_json() for o in expected] == expected
def test_device_ls(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
c = cephadm_module.get_inventory()
@ -193,7 +245,7 @@ class TestCephadm(object):
with self._with_host(cephadm_module, 'test'):
ps = PlacementSpec(hosts=['test'], count=1)
c = cephadm_module.add_rgw(RGWSpec('realm', 'zone', placement=ps))
c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone', placement=ps))
[out] = wait(cephadm_module, c)
match_glob(out, "Deployed rgw.realm.zone.* on host 'test'")
@ -203,12 +255,12 @@ class TestCephadm(object):
with self._with_host(cephadm_module, 'host1'):
with self._with_host(cephadm_module, 'host2'):
ps = PlacementSpec(hosts=['host1'], count=1)
c = cephadm_module.add_rgw(RGWSpec('realm', 'zone1', placement=ps))
c = cephadm_module.add_rgw(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
[out] = wait(cephadm_module, c)
match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
r = cephadm_module._apply_service(RGWSpec('realm', 'zone1', placement=ps))
r = cephadm_module._apply_service(RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
assert r
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
@ -318,7 +370,7 @@ class TestCephadm(object):
spec = ServiceSpec('mgr', placement=ps)
c = cephadm_module.apply_mgr(spec)
assert wait(cephadm_module, c) == 'Scheduled mgr update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_mds_save(self, cephadm_module):
@ -327,7 +379,7 @@ class TestCephadm(object):
spec = ServiceSpec('mds', 'fsname', placement=ps)
c = cephadm_module.apply_mds(spec)
assert wait(cephadm_module, c) == 'Scheduled mds update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_rgw_save(self, cephadm_module):
@ -336,7 +388,7 @@ class TestCephadm(object):
spec = ServiceSpec('rgw', 'r.z', placement=ps)
c = cephadm_module.apply_rgw(spec)
assert wait(cephadm_module, c) == 'Scheduled rgw update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_rbd_mirror_save(self, cephadm_module):
@ -345,7 +397,7 @@ class TestCephadm(object):
spec = ServiceSpec('rbd-mirror', placement=ps)
c = cephadm_module.apply_rbd_mirror(spec)
assert wait(cephadm_module, c) == 'Scheduled rbd-mirror update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_nfs_save(self, cephadm_module):
@ -354,7 +406,7 @@ class TestCephadm(object):
spec = NFSServiceSpec('name', pool='pool', namespace='namespace', placement=ps)
c = cephadm_module.apply_nfs(spec)
assert wait(cephadm_module, c) == 'Scheduled nfs update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_prometheus_save(self, cephadm_module):
@ -363,7 +415,7 @@ class TestCephadm(object):
spec = ServiceSpec('prometheus', placement=ps)
c = cephadm_module.apply_prometheus(spec)
assert wait(cephadm_module, c) == 'Scheduled prometheus update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_node_exporter_save(self, cephadm_module):
@ -372,5 +424,5 @@ class TestCephadm(object):
spec = ServiceSpec('node-exporter', placement=ps, service_id='my_exporter')
c = cephadm_module.apply_node_exporter(spec)
assert wait(cephadm_module, c) == 'Scheduled node-exporter update...'
assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
assert wait(cephadm_module, cephadm_module.list_specs('node-exporter.my_exporter')) == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service(service_name='node-exporter.my_exporter'))] == [spec]

View File

@ -27,20 +27,26 @@ describe('ServicesComponent', () => {
const services = [
{
container_image_id: 'e70344c77bcbf3ee389b9bf5128f635cf95f3d59e005c5d8e67fc19bcc74ed23',
container_image_name: 'docker.io/ceph/daemon-base:latest-master-devel',
service_type: 'osd',
service_name: 'osd',
size: 3,
running: 3,
last_refresh: '2020-02-25T04:33:26.465699'
status: {
container_image_id: 'e70344c77bcbf3ee389b9bf5128f635cf95f3d59e005c5d8e67fc19bcc74ed23',
container_image_name: 'docker.io/ceph/daemon-base:latest-master-devel',
size: 3,
running: 3,
last_refresh: '2020-02-25T04:33:26.465699'
}
},
{
container_image_id: 'e70344c77bcbf3ee389b9bf5128f635cf95f3d59e005c5d8e67fc19bcc74ed23',
container_image_name: 'docker.io/ceph/daemon-base:latest-master-devel',
service_type: 'crash',
service_name: 'crash',
size: 1,
running: 1,
last_refresh: '2020-02-25T04:33:26.465766'
status: {
container_image_id: 'e70344c77bcbf3ee389b9bf5128f635cf95f3d59e005c5d8e67fc19bcc74ed23',
container_image_name: 'docker.io/ceph/daemon-base:latest-master-devel',
size: 1,
running: 1,
last_refresh: '2020-02-25T04:33:26.465766'
}
}
];

View File

@ -8,7 +8,7 @@ import { CdTableColumn } from '../../../shared/models/cd-table-column';
import { CdTableFetchDataContext } from '../../../shared/models/cd-table-fetch-data-context';
import { CdTableSelection } from '../../../shared/models/cd-table-selection';
import { Permissions } from '../../../shared/models/permissions';
import { CephService } from '../../../shared/models/service.interface';
import { CephServiceSpec } from '../../../shared/models/service.interface';
import { AuthStorageService } from '../../../shared/services/auth-storage.service';
@Component({
@ -32,7 +32,7 @@ export class ServicesComponent implements OnChanges, OnInit {
docsUrl: string;
columns: Array<CdTableColumn> = [];
services: Array<CephService> = [];
services: Array<CephServiceSpec> = [];
isLoadingServices = false;
selection = new CdTableSelection();
@ -54,27 +54,27 @@ export class ServicesComponent implements OnChanges, OnInit {
},
{
name: this.i18n('Container image name'),
prop: 'container_image_name',
prop: 'status.container_image_name',
flexGrow: 3
},
{
name: this.i18n('Container image ID'),
prop: 'container_image_id',
prop: 'status.container_image_id',
flexGrow: 3
},
{
name: this.i18n('Running'),
prop: 'running',
prop: 'status.running',
flexGrow: 1
},
{
name: this.i18n('Size'),
prop: 'size',
prop: 'status.size',
flexGrow: 1
},
{
name: this.i18n('Last Refreshed'),
prop: 'last_refresh',
prop: 'status.last_refresh',
flexGrow: 1
}
];
@ -105,7 +105,7 @@ export class ServicesComponent implements OnChanges, OnInit {
}
this.isLoadingServices = true;
this.cephServiceService.list().subscribe(
(services: CephService[]) => {
(services: CephServiceSpec[]) => {
this.services = services;
this.isLoadingServices = false;
},

View File

@ -4,7 +4,7 @@ import { Injectable } from '@angular/core';
import { Observable } from 'rxjs';
import { Daemon } from '../models/daemon.interface';
import { CephService } from '../models/service.interface';
import { CephServiceSpec } from '../models/service.interface';
import { ApiModule } from './api.module';
@Injectable({
@ -15,11 +15,11 @@ export class CephServiceService {
constructor(private http: HttpClient) {}
list(serviceName?: string): Observable<CephService[]> {
list(serviceName?: string): Observable<CephServiceSpec[]> {
const options = serviceName
? { params: new HttpParams().set('service_name', serviceName) }
: {};
return this.http.get<CephService[]>(this.url, options);
return this.http.get<CephServiceSpec[]>(this.url, options);
}
getDaemons(serviceName?: string): Observable<Daemon[]> {

View File

@ -1,8 +1,17 @@
export interface CephService {
export interface CephServiceStatus {
container_image_id: string;
container_image_name: string;
service_name: string;
size: number;
running: number;
last_refresh: Date;
created: Date;
}
// This will become handy when creating arbitrary services
export interface CephServiceSpec {
service_name: string;
service_type: string;
service_id: string;
unmanaged: boolean;
status: CephServiceStatus;
}

View File

@ -889,13 +889,6 @@ class Orchestrator(object):
"""
raise NotImplementedError()
def list_specs(self, service_name=None):
# type: (Optional[str]) -> Completion
"""
Lists saved service specs
"""
raise NotImplementedError()
def remove_service(self, service_name):
# type: (str) -> Completion
"""
@ -1274,14 +1267,17 @@ class DaemonDescription(object):
return self.name().startswith(service_name + '.')
return False
def service_name(self):
def service_id(self):
if self.daemon_type == 'rgw':
v = self.daemon_id.split('.')
s_name = '.'.join(v[0:2])
return 'rgw.%s' % s_name
return '.'.join(v[0:2])
if self.daemon_type in ['mds', 'nfs']:
_s_name = self.daemon_id.split('.')[0]
return '%s.%s' % (self.daemon_type, _s_name)
return self.daemon_id.split('.')[0]
return self.daemon_type
def service_name(self):
if self.daemon_type in ['rgw', 'mds', 'nfs']:
return f'{self.daemon_type}.{self.service_id()}'
return self.daemon_type
def __repr__(self):
@ -1330,26 +1326,21 @@ class ServiceDescription(object):
"""
def __init__(self,
spec: ServiceSpec,
container_image_id=None,
container_image_name=None,
service_name=None,
rados_config_location=None,
service_url=None,
last_refresh=None,
created=None,
size=0,
running=0,
spec=None):
running=0):
# Not everyone runs in containers, but enough people do to
# justify having the container_image_id (image hash) and container_image
# (image name)
self.container_image_id = container_image_id # image hash
self.container_image_name = container_image_name # image friendly name
# The service_name is either a bare type (e.g., 'mgr') or
# type.id combination (e.g., 'mds.fsname' or 'rgw.realm.zone').
self.service_name = service_name
# Location of the service configuration when stored in rados
# object. Format: "rados://<pool>/[<namespace/>]<object>"
self.rados_config_location = rados_config_location
@ -1368,40 +1359,45 @@ class ServiceDescription(object):
self.last_refresh = last_refresh # type: Optional[datetime.datetime]
self.created = created # type: Optional[datetime.datetime]
self.spec = spec
self.spec: ServiceSpec = spec
def service_type(self):
if self.service_name:
return self.service_name.split('.')[0]
return None
return self.spec.service_type
def __repr__(self):
return "<ServiceDescription>({name})".format(name=self.service_name)
return f"<ServiceDescription of {self.spec.one_line_str()}>"
def to_json(self):
out = {
out = self.spec.to_json()
status = {
'container_image_id': self.container_image_id,
'container_image_name': self.container_image_name,
'service_name': self.service_name,
'rados_config_location': self.rados_config_location,
'service_url': self.service_url,
'size': self.size,
'running': self.running,
'spec': self.spec.to_json() if self.spec is not None else None
'last_refresh': self.last_refresh,
'created': self.created
}
for k in ['last_refresh', 'created']:
if getattr(self, k):
out[k] = getattr(self, k).strftime(DATEFMT)
return {k: v for (k, v) in out.items() if v is not None}
status[k] = getattr(self, k).strftime(DATEFMT)
status = {k: v for (k, v) in status.items() if v is not None}
out['status'] = status
return out
@classmethod
@handle_type_error
def from_json(cls, data):
def from_json(cls, data: dict):
c = data.copy()
status = c.pop('status', {})
spec = ServiceSpec.from_json(c)
c_status = status.copy()
for k in ['last_refresh', 'created']:
if k in c:
c[k] = datetime.datetime.strptime(c[k], DATEFMT)
return cls(**c)
c_status[k] = datetime.datetime.strptime(c_status[k], DATEFMT)
return cls(spec=spec, **c_status)
class InventoryFilter(object):

View File

@ -24,7 +24,8 @@ from mgr_module import MgrModule, HandleCommandResult
from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta
RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
ServiceDescription
def nice_delta(now, t, suffix=''):
if t:
@ -32,6 +33,16 @@ def nice_delta(now, t, suffix=''):
else:
return '-'
def to_format(what, format):
if format == 'json':
return json.dumps(what, sort_keys=True)
elif format == 'json-pretty':
return json.dumps(what, indent=2, sort_keys=True)
elif format == 'yaml':
return yaml.safe_dump_all(what, default_flow_style=False)
@six.add_metaclass(CLICommandMeta)
class OrchestratorCli(OrchestratorClientMixin, MgrModule):
MODULE_OPTIONS = [
@ -202,16 +213,16 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
@_cli_read_command(
'orch host ls',
'name=format,type=CephChoices,strings=json|plain,req=false',
'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false',
'List hosts')
def _get_hosts(self, format='plain'):
completion = self.get_hosts()
self._orchestrator_wait([completion])
raise_if_exception(completion)
if format == 'json':
if format != 'plain':
hosts = [host.to_json()
for host in completion.result]
output = json.dumps(hosts, sort_keys=True)
output = to_format(hosts, format)
else:
table = PrettyTable(
['HOST', 'ADDR', 'LABELS', 'STATUS'],
@ -249,7 +260,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
@_cli_read_command(
'orch device ls',
"name=hostname,type=CephString,n=N,req=false "
"name=format,type=CephChoices,strings=json|plain,req=false "
"name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
"name=refresh,type=CephBool,req=false",
'List devices on a host')
def _list_devices(self, hostname=None, format='plain', refresh=False):
@ -268,9 +279,9 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
self._orchestrator_wait([completion])
raise_if_exception(completion)
if format == 'json':
if format != 'plain':
data = [n.to_json() for n in completion.result]
return HandleCommandResult(stdout=json.dumps(data))
return HandleCommandResult(stdout=to_format(data, format))
else:
out = []
@ -316,28 +327,36 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
'orch ls',
"name=service_type,type=CephString,req=false "
"name=service_name,type=CephString,req=false "
"name=format,type=CephChoices,strings=json|plain,req=false "
"name=export,type=CephBool,req=false "
"name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
"name=refresh,type=CephBool,req=false",
'List services known to orchestrator')
def _list_services(self, host=None, service_type=None, service_name=None, format='plain', refresh=False):
def _list_services(self, host=None, service_type=None, service_name=None, export=False, format='plain', refresh=False):
if export and format == 'plain':
format = 'yaml'
completion = self.describe_service(service_type,
service_name,
refresh=refresh)
self._orchestrator_wait([completion])
raise_if_exception(completion)
services = completion.result
services: List[ServiceDescription] = completion.result
def ukn(s):
return '<unknown>' if s is None else s
# Sort the list for display
services.sort(key=lambda s: (ukn(s.service_name)))
services.sort(key=lambda s: (ukn(s.spec.service_name())))
if len(services) == 0:
return HandleCommandResult(stdout="No services reported")
elif format == 'json':
data = [s.to_json() for s in services]
return HandleCommandResult(stdout=json.dumps(data))
elif format != 'plain':
if export:
data = [s.spec.to_json() for s in services]
else:
data = [s.to_json() for s in services]
return HandleCommandResult(stdout=to_format(data, format))
else:
now = datetime.datetime.utcnow()
table = PrettyTable(
@ -355,7 +374,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
table.align['PLACEMENT'] = 'l'
table.left_padding_width = 0
table.right_padding_width = 2
for s in sorted(services, key=lambda s: s.service_name):
for s in services:
if not s.spec:
pl = '<no spec>'
elif s.spec.unmanaged:
@ -363,7 +382,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
else:
pl = s.spec.placement.pretty_str()
table.add_row((
s.service_name,
s.spec.service_name(),
'%d/%d' % (s.running, s.size),
nice_delta(now, s.last_refresh, ' ago'),
nice_delta(now, s.created),
@ -380,7 +399,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
"name=service_name,type=CephString,req=false "
"name=daemon_type,type=CephString,req=false "
"name=daemon_id,type=CephString,req=false "
"name=format,type=CephChoices,strings=json|plain,req=false "
"name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false "
"name=refresh,type=CephBool,req=false",
'List daemons known to orchestrator')
def _list_daemons(self, hostname=None, service_name=None, daemon_type=None, daemon_id=None, format='plain', refresh=False):
@ -400,9 +419,9 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
if len(daemons) == 0:
return HandleCommandResult(stdout="No daemons reported")
elif format == 'json':
elif format != 'plain':
data = [s.to_json() for s in daemons]
return HandleCommandResult(stdout=json.dumps(data))
return HandleCommandResult(stdout=to_format(data, format))
else:
now = datetime.datetime.utcnow()
table = PrettyTable(
@ -697,17 +716,6 @@ Usage:
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
@_cli_write_command(
'orch spec dump',
'name=service_name,type=CephString,req=false',
desc='List all Service specs')
def _get_service_specs(self, service_name=None):
completion = self.list_specs(service_name=service_name)
self._orchestrator_wait([completion])
raise_if_exception(completion)
specs = completion.result
return HandleCommandResult(stdout=yaml.safe_dump_all(specs))
@_cli_write_command(
'orch apply',
'name=service_type,type=CephChoices,strings=mon|mgr|rbd-mirror|crash|alertmanager|grafana|node-exporter|prometheus,req=false '

View File

@ -266,7 +266,6 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
spec = {}
spec['mon'] = orchestrator.ServiceDescription(
service_name='mon',
spec=ServiceSpec(
'mon',
placement=PlacementSpec(
@ -278,7 +277,6 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
last_refresh=now,
)
spec['mgr'] = orchestrator.ServiceDescription(
service_name='mgr',
spec=ServiceSpec(
'mgr',
placement=PlacementSpec.from_string('count:1'),
@ -289,7 +287,6 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
)
if not cl['spec'].get('crashCollector', {}).get('disable', False):
spec['crash'] = orchestrator.ServiceDescription(
service_name='crash',
spec=ServiceSpec(
'crash',
placement=PlacementSpec.from_string('*'),
@ -313,9 +310,9 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
if fs['spec'].get('metadataServer', {}).get('activeStandby', False):
total_mds = active * 2
spec[svc] = orchestrator.ServiceDescription(
service_name=svc,
spec=ServiceSpec(
svc,
service_type='mds',
service_id=fs['metadata']['name'],
placement=PlacementSpec(count=active),
),
size=total_mds,
@ -341,8 +338,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
ssl = False
port = zone['spec']['gateway']['port'] or 80
spec[svc] = orchestrator.ServiceDescription(
service_name=svc,
spec=RGWSpec(
service_id=rgw_realm + '.' + rgw_zone,
rgw_realm=rgw_realm,
rgw_zone=rgw_zone,
ssl=ssl,

View File

@ -210,7 +210,10 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
continue
daemon_size = len(list(daemons))
services.append(orchestrator.ServiceDescription(
service_name=daemon_type, size=daemon_size, running=daemon_size))
spec=ServiceSpec(
service_type=service_type,
),
size=daemon_size, running=daemon_size))
def _filter_func(svc):
if service_name is not None and service_name != svc.service_name:

View File

@ -11,7 +11,7 @@ setenv =
deps =
cython
-r requirements.txt
commands = pytest -v --cov --cov-append --cov-report=term --doctest-modules {posargs:mgr_util.py tests/ cephadm/ pg_autoscaler/ progress/}
commands = pytest -v --cov --cov-append --cov-report= --doctest-modules {posargs:mgr_util.py tests/ cephadm/ pg_autoscaler/ progress/}
[testenv:mypy]
basepython = python3

View File

@ -282,6 +282,7 @@ class DriveGroupSpec(ServiceSpec):
c['db_devices'] = self.db_devices.to_json()
if self.wal_devices:
c['wal_devices'] = self.wal_devices.to_json()
c['service_name'] = self.service_name()
return c
def __eq__(self, other):

View File

@ -1,6 +1,7 @@
import fnmatch
import re
from collections import namedtuple
from functools import wraps
from typing import Optional, Dict, Any, List, Union
import six
@ -28,6 +29,17 @@ def assert_valid_host(name):
raise ServiceSpecValidationError(e)
def handle_type_error(method):
@wraps(method)
def inner(cls, *args, **kwargs):
try:
return method(cls, *args, **kwargs)
except (TypeError, AttributeError) as e:
error_msg = '{}: {}'.format(cls.__name__, e)
raise ServiceSpecValidationError(error_msg)
return inner
class HostPlacementSpec(namedtuple('HostPlacementSpec', ['hostname', 'network', 'name'])):
def __str__(self):
res = ''
@ -39,6 +51,7 @@ class HostPlacementSpec(namedtuple('HostPlacementSpec', ['hostname', 'network',
return res
@classmethod
@handle_type_error
def from_json(cls, data):
return cls(**data)
@ -189,11 +202,13 @@ class PlacementSpec(object):
return "PlacementSpec(%s)" % ', '.join(kv)
@classmethod
@handle_type_error
def from_json(cls, data):
hosts = data.get('hosts', [])
c = data.copy()
hosts = c.get('hosts', [])
if hosts:
data['hosts'] = [HostPlacementSpec.from_json(host) for host in hosts]
_cls = cls(**data)
c['hosts'] = [HostPlacementSpec.from_json(host) for host in hosts]
_cls = cls(**c)
_cls.validate()
return _cls
@ -326,6 +341,34 @@ class ServiceSpec(object):
KNOWN_SERVICE_TYPES = 'alertmanager crash grafana mds mgr mon nfs ' \
'node-exporter osd prometheus rbd-mirror rgw'.split()
@classmethod
def _cls(cls, service_type):
from ceph.deployment.drive_group import DriveGroupSpec
ret = {
'rgw': RGWSpec,
'nfs': NFSServiceSpec,
'osd': DriveGroupSpec
}.get(service_type, cls)
if ret == ServiceSpec and not service_type:
raise ServiceSpecValidationError('Spec needs a "service_type" key.')
return ret
def __new__(cls, *args, **kwargs):
"""
Some Python foo to make sure, we don't have an object
like `ServiceSpec('rgw')` of type `ServiceSpec`. Now we have:
>>> type(ServiceSpec('rgw')) == type(RGWSpec('rgw'))
True
"""
if cls != ServiceSpec:
return object.__new__(cls)
service_type = kwargs.get('service_type', args[0] if args else None)
sub_cls = cls._cls(service_type)
return object.__new__(sub_cls)
def __init__(self,
service_type, # type: str
service_id=None, # type: Optional[str]
@ -341,6 +384,7 @@ class ServiceSpec(object):
self.unmanaged = unmanaged
@classmethod
@handle_type_error
def from_json(cls, json_spec):
# type: (dict) -> Any
# Python 3:
@ -350,19 +394,27 @@ class ServiceSpec(object):
Initialize 'ServiceSpec' object data from a json structure
:param json_spec: A valid dict with ServiceSpec
"""
from ceph.deployment.drive_group import DriveGroupSpec
service_type = json_spec.get('service_type', '')
_cls = {
'rgw': RGWSpec,
'nfs': NFSServiceSpec,
'osd': DriveGroupSpec
}.get(service_type, cls)
c = json_spec.copy()
if _cls == ServiceSpec and not service_type:
raise ServiceSpecValidationError('Spec needs a "service_type" key.')
# kludge to make `from_json` compatible to `Orchestrator.describe_service`
# Open question: Remove `service_id` form to_json?
if c.get('service_name', ''):
service_type_id = c['service_name'].split('.', 1)
return _cls._from_json_impl(json_spec) # type: ignore
if not c.get('service_type', ''):
c['service_type'] = service_type_id[0]
if not c.get('service_id', '') and len(service_type_id) > 1:
c['service_id'] = service_type_id[1]
del c['service_name']
service_type = c.get('service_type', '')
_cls = cls._cls(service_type)
if 'status' in c:
del c['status'] # kludge to make us compatible to `ServiceDescription.to_json()`
return _cls._from_json_impl(c) # type: ignore
@classmethod
def _from_json_impl(cls, json_spec):
@ -390,6 +442,8 @@ class ServiceSpec(object):
val = val.to_json()
if val:
c[key] = val
c['service_name'] = self.service_name()
return c
def validate(self):
@ -415,7 +469,7 @@ def servicespec_validate_add(self: ServiceSpec):
class NFSServiceSpec(ServiceSpec):
def __init__(self, service_id, pool=None, namespace=None, placement=None,
def __init__(self, service_id=None, pool=None, namespace=None, placement=None,
service_type='nfs', unmanaged=False):
assert service_type == 'nfs'
super(NFSServiceSpec, self).__init__(
@ -441,17 +495,17 @@ class RGWSpec(ServiceSpec):
"""
def __init__(self,
service_type='rgw',
service_id=None, # type: Optional[str]
placement=None,
rgw_realm=None, # type: Optional[str]
rgw_zone=None, # type: Optional[str]
subcluster=None, # type: Optional[str]
service_id=None, # type: Optional[str]
placement=None,
service_type='rgw',
rgw_frontend_port=None, # type: Optional[int]
unmanaged=False, # type: bool
ssl=False, # type: bool
):
assert service_type == 'rgw'
assert service_type == 'rgw', service_type
if service_id:
a = service_id.split('.', 2)
rgw_realm = a[0]