Merge pull request #46883 from adk3798/custom-config

mgr/cephadm: support for miscellaneous config files for daemons

Reviewed-by: Anthony D'Atri <anthonyeleven@users.noreply.github.com>
Reviewed-by: John Mulligan <jmulligan@redhat.com>
This commit is contained in:
Adam King 2022-07-27 19:49:44 -04:00 committed by GitHub
commit 3f1f862b9c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 328 additions and 38 deletions

View File

@ -523,6 +523,57 @@ a spec like
which would cause each mon daemon to be deployed with `--cpus=2`.
Custom Config Files
===================
Cephadm supports specifying miscellaneous config files for daemons.
To do so, users must provide both the content of the config file and the
location within the daemon's container at which it should be mounted. After
applying a YAML spec with custom config files specified and having cephadm
redeploy the daemons for which the config files are specified, these files will
be mounted within the daemon's container at the specified location.
Example service spec:
.. code-block:: yaml
service_type: grafana
service_name: grafana
custom_configs:
- mount_path: /etc/example.conf
content: |
setting1 = value1
setting2 = value2
- mount_path: /usr/share/grafana/example.cert
content: |
-----BEGIN PRIVATE KEY-----
V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt
ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15
IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu
YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg
ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt
ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15
IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu
YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg
ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=
-----END CERTIFICATE-----
To make these new config files actually get mounted within the
containers for the daemons
.. prompt:: bash
ceph orch redeploy <service-name>
For example:
.. prompt:: bash
ceph orch redeploy grafana
.. _orch-rm:
Removing a Service

View File

@ -2760,10 +2760,46 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid,
sg = SNMPGateway.init(ctx, fsid, daemon_id)
sg.create_daemon_conf()
_write_custom_conf_files(ctx, daemon_type, str(daemon_id), fsid, uid, gid)
def get_parm(option):
# type: (str) -> Dict[str, str]
def _write_custom_conf_files(ctx: CephadmContext, daemon_type: str, daemon_id: str, fsid: str, uid: int, gid: int) -> None:
# mostly making this its own function to make unit testing easier
if 'config_json' not in ctx or not ctx.config_json:
return
config_json = get_custom_config_files(ctx.config_json)
custom_config_dir = os.path.join(ctx.data_dir, fsid, 'custom_config_files', f'{daemon_type}.{daemon_id}')
if not os.path.exists(custom_config_dir):
makedirs(custom_config_dir, uid, gid, 0o755)
mandatory_keys = ['mount_path', 'content']
for ccf in config_json['custom_config_files']:
if all(k in ccf for k in mandatory_keys):
file_path = os.path.join(custom_config_dir, os.path.basename(ccf['mount_path']))
with open(file_path, 'w+', encoding='utf-8') as f:
os.fchown(f.fileno(), uid, gid)
os.fchmod(f.fileno(), 0o600)
f.write(ccf['content'])
def get_parm(option: str) -> Dict[str, str]:
js = _get_config_json(option)
# custom_config_files is a special field that may be in the config
# dict. It is used for mounting custom config files into daemon's containers
# and should be accessed through the "get_custom_config_files" function.
# For get_parm we need to discard it.
js.pop('custom_config_files', None)
return js
def get_custom_config_files(option: str) -> Dict[str, List[Dict[str, str]]]:
js = _get_config_json(option)
res: Dict[str, List[Dict[str, str]]] = {'custom_config_files': []}
if 'custom_config_files' in js:
res['custom_config_files'] = js['custom_config_files']
return res
def _get_config_json(option: str) -> Dict[str, Any]:
if not option:
return dict()
@ -5789,16 +5825,30 @@ def extract_uid_gid_monitoring(ctx, daemon_type):
return uid, gid
def get_container_with_extra_args(ctx: CephadmContext,
fsid: str, daemon_type: str, daemon_id: Union[int, str],
privileged: bool = False,
ptrace: bool = False,
container_args: Optional[List[str]] = None) -> 'CephContainer':
# wrapper for get_container that additionally adds extra_container_args if present
# used for deploying daemons with additional podman/docker container arguments
def get_deployment_container(ctx: CephadmContext,
fsid: str, daemon_type: str, daemon_id: Union[int, str],
privileged: bool = False,
ptrace: bool = False,
container_args: Optional[List[str]] = None) -> 'CephContainer':
# wrapper for get_container specifically for containers made during the `cephadm deploy`
# command. Adds some extra things such as extra container args and custom config files
c = get_container(ctx, fsid, daemon_type, daemon_id, privileged, ptrace, container_args)
if 'extra_container_args' in ctx and ctx.extra_container_args:
c.container_args.extend(ctx.extra_container_args)
if 'config_json' in ctx and ctx.config_json:
conf_files = get_custom_config_files(ctx.config_json)
mandatory_keys = ['mount_path', 'content']
for conf in conf_files['custom_config_files']:
if all(k in conf for k in mandatory_keys):
mount_path = conf['mount_path']
file_path = os.path.join(
ctx.data_dir,
fsid,
'custom_config_files',
f'{daemon_type}.{daemon_id}',
os.path.basename(mount_path)
)
c.volume_mounts[file_path] = mount_path
return c
@ -5843,8 +5893,8 @@ def command_deploy(ctx):
uid, gid = extract_uid_gid(ctx)
make_var_run(ctx, ctx.fsid, uid, gid)
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id,
ptrace=ctx.allow_ptrace)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id,
ptrace=ctx.allow_ptrace)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
config=config, keyring=keyring,
osd_fsid=ctx.osd_fsid,
@ -5868,7 +5918,7 @@ def command_deploy(ctx):
'contain arg for {}'.format(daemon_type.capitalize(), ', '.join(required_args)))
uid, gid = extract_uid_gid_monitoring(ctx, daemon_type)
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
reconfig=ctx.reconfig,
ports=daemon_ports)
@ -5880,7 +5930,7 @@ def command_deploy(ctx):
config, keyring = get_config_and_keyring(ctx)
# TODO: extract ganesha uid/gid (997, 994) ?
uid, gid = extract_uid_gid(ctx)
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
config=config, keyring=keyring,
reconfig=ctx.reconfig,
@ -5889,7 +5939,7 @@ def command_deploy(ctx):
elif daemon_type == CephIscsi.daemon_type:
config, keyring = get_config_and_keyring(ctx)
uid, gid = extract_uid_gid(ctx)
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
config=config, keyring=keyring,
reconfig=ctx.reconfig,
@ -5903,7 +5953,7 @@ def command_deploy(ctx):
elif daemon_type == HAproxy.daemon_type:
haproxy = HAproxy.init(ctx, ctx.fsid, daemon_id)
uid, gid = haproxy.extract_uid_gid_haproxy()
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
reconfig=ctx.reconfig,
ports=daemon_ports)
@ -5911,7 +5961,7 @@ def command_deploy(ctx):
elif daemon_type == Keepalived.daemon_type:
keepalived = Keepalived.init(ctx, ctx.fsid, daemon_id)
uid, gid = keepalived.extract_uid_gid_keepalived()
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid,
reconfig=ctx.reconfig,
ports=daemon_ports)
@ -5920,9 +5970,9 @@ def command_deploy(ctx):
cc = CustomContainer.init(ctx, ctx.fsid, daemon_id)
if not ctx.reconfig and not redeploy:
daemon_ports.extend(cc.ports)
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id,
privileged=cc.privileged,
ptrace=ctx.allow_ptrace)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id,
privileged=cc.privileged,
ptrace=ctx.allow_ptrace)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c,
uid=cc.uid, gid=cc.gid, config=None,
keyring=None, reconfig=ctx.reconfig,
@ -5937,7 +5987,7 @@ def command_deploy(ctx):
elif daemon_type == SNMPGateway.daemon_type:
sc = SNMPGateway.init(ctx, ctx.fsid, daemon_id)
c = get_container_with_extra_args(ctx, ctx.fsid, daemon_type, daemon_id)
c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id)
deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c,
sc.uid, sc.gid,
ports=daemon_ports)

View File

@ -243,6 +243,83 @@ class TestCephAdm(object):
with pytest.raises(Exception):
cd.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None)
@mock.patch('cephadm.logger')
@mock.patch('cephadm.get_custom_config_files')
@mock.patch('cephadm.get_container')
def test_get_deployment_container(self, _get_container, _get_config, logger):
"""
test get_deployment_container properly makes use of extra container args and custom conf files
"""
ctx = cd.CephadmContext()
ctx.config_json = '-'
ctx.extra_container_args = [
'--pids-limit=12345',
'--something',
]
ctx.data_dir = 'data'
_get_config.return_value = {'custom_config_files': [
{
'mount_path': '/etc/testing.str',
'content': 'this\nis\na\nstring',
}
]}
_get_container.return_value = cd.CephContainer.for_daemon(
ctx,
fsid='9b9d7609-f4d5-4aba-94c8-effa764d96c9',
daemon_type='grafana',
daemon_id='host1',
entrypoint='',
args=[],
container_args=[],
volume_mounts={},
bind_mounts=[],
envs=[],
privileged=False,
ptrace=False,
host_network=True,
)
c = cd.get_deployment_container(ctx,
'9b9d7609-f4d5-4aba-94c8-effa764d96c9',
'grafana',
'host1',)
assert '--pids-limit=12345' in c.container_args
assert '--something' in c.container_args
assert os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str') in c.volume_mounts
assert c.volume_mounts[os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str')] == '/etc/testing.str'
@mock.patch('cephadm.logger')
@mock.patch('cephadm.get_custom_config_files')
def test_write_custom_conf_files(self, _get_config, logger, cephadm_fs):
"""
test _write_custom_conf_files writes the conf files correctly
"""
ctx = cd.CephadmContext()
ctx.config_json = '-'
ctx.data_dir = cd.DATA_DIR
_get_config.return_value = {'custom_config_files': [
{
'mount_path': '/etc/testing.str',
'content': 'this\nis\na\nstring',
},
{
'mount_path': '/etc/testing.conf',
'content': 'very_cool_conf_setting: very_cool_conf_value\nx: y',
},
{
'mount_path': '/etc/no-content.conf',
},
]}
cd._write_custom_conf_files(ctx, 'mon', 'host1', 'fsid', 0, 0)
with open(os.path.join(cd.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.str'), 'r') as f:
assert 'this\nis\na\nstring' == f.read()
with open(os.path.join(cd.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.conf'), 'r') as f:
assert 'very_cool_conf_setting: very_cool_conf_value\nx: y' == f.read()
with pytest.raises(FileNotFoundError):
open(os.path.join(cd.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'no-content.conf'), 'r')
@mock.patch('cephadm.call_throws')
@mock.patch('cephadm.get_parm')
def test_registry_login(self, get_parm, call_throws):

View File

@ -1132,6 +1132,12 @@ class CephadmServe:
except AttributeError:
eca = None
if daemon_spec.service_name in self.mgr.spec_store:
configs = self.mgr.spec_store[daemon_spec.service_name].spec.custom_configs
if configs is not None:
daemon_spec.final_config.update(
{'custom_config_files': [c.to_json() for c in configs]})
if self.mgr.cache.host_needs_registry_login(daemon_spec.host) and self.mgr.registry_url:
await self._registry_login(daemon_spec.host, json.loads(str(self.mgr.get_store('registry_credentials'))))

View File

@ -40,7 +40,8 @@ class CephadmDaemonDeploySpec:
ports: Optional[List[int]] = None,
rank: Optional[int] = None,
rank_generation: Optional[int] = None,
extra_container_args: Optional[List[str]] = None):
extra_container_args: Optional[List[str]] = None,
):
"""
A data struction to encapsulate `cephadm deploy ...
"""
@ -178,10 +179,6 @@ class CephadmService(metaclass=ABCMeta):
rank: Optional[int] = None,
rank_generation: Optional[int] = None,
) -> CephadmDaemonDeploySpec:
try:
eca = spec.extra_container_args
except AttributeError:
eca = None
return CephadmDaemonDeploySpec(
host=host,
daemon_id=daemon_id,
@ -192,7 +189,8 @@ class CephadmService(metaclass=ABCMeta):
ip=ip,
rank=rank,
rank_generation=rank_generation,
extra_container_args=eca,
extra_container_args=spec.extra_container_args if hasattr(
spec, 'extra_container_args') else None,
)
def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec:

View File

@ -16,7 +16,8 @@ except ImportError:
pass
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec, MDSSpec
NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec, MDSSpec, \
CustomConfig
from ceph.deployment.drive_selection.selector import DriveSelection
from ceph.deployment.inventory import Devices, Device
from ceph.utils import datetime_to_str, datetime_now
@ -475,6 +476,37 @@ class TestCephadm(object):
image='',
)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
def test_custom_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
test_cert = ['-----BEGIN PRIVATE KEY-----',
'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
'-----END PRIVATE KEY-----',
'-----BEGIN CERTIFICATE-----',
'YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg',
'ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=',
'-----END CERTIFICATE-----']
configs = [
CustomConfig(content='something something something',
mount_path='/etc/test.conf'),
CustomConfig(content='\n'.join(test_cert), mount_path='/usr/share/grafana/thing.crt')
]
conf_outs = [json.dumps(c.to_json()) for c in configs]
stdin_str = '{' + \
f'"config": "", "keyring": "", "custom_config_files": [{conf_outs[0]}, {conf_outs[1]}]' + '}'
with with_host(cephadm_module, 'test'):
with with_service(cephadm_module, ServiceSpec(service_type='crash', custom_configs=configs), CephadmOrchestrator.apply_crash):
_run_cephadm.assert_called_with(
'test', 'crash.test', 'deploy', [
'--name', 'crash.test',
'--meta-json', '{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null}',
'--config-json', '-',
],
stdin=stdin_str,
image='',
)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator):
with with_host(cephadm_module, 'test'):

View File

@ -2,7 +2,7 @@ import enum
import yaml
from ceph.deployment.inventory import Device
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, CustomConfig
from ceph.deployment.hostspec import SpecValidationError
try:
@ -179,6 +179,7 @@ class DriveGroupSpec(ServiceSpec):
method=None, # type: Optional[OSDMethod]
crush_device_class=None, # type: Optional[str]
config=None, # type: Optional[Dict[str, str]]
custom_configs=None, # type: Optional[List[CustomConfig]]
):
assert service_type is None or service_type == 'osd'
super(DriveGroupSpec, self).__init__('osd', service_id=service_id,
@ -186,7 +187,8 @@ class DriveGroupSpec(ServiceSpec):
config=config,
unmanaged=unmanaged,
preview_only=preview_only,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args,
custom_configs=custom_configs)
#: A :class:`ceph.deployment.drive_group.DeviceSelection`
self.data_devices = data_devices

View File

@ -1,4 +1,5 @@
import fnmatch
import os
import re
import enum
from collections import OrderedDict
@ -420,6 +421,58 @@ tPlacementSpec(hostname='host2', network='', name='')])
_service_spec_from_json_validate = True
class CustomConfig:
"""
Class to specify custom config files to be mounted in daemon's container
"""
_fields = ['content', 'mount_path']
def __init__(self, content: str, mount_path: str) -> None:
self.content: str = content
self.mount_path: str = mount_path
self.validate()
def to_json(self) -> Dict[str, Any]:
return {
'content': self.content,
'mount_path': self.mount_path,
}
@classmethod
def from_json(cls, data: Dict[str, Any]) -> "CustomConfig":
for k in cls._fields:
if k not in data:
raise SpecValidationError(f'CustomConfig must have "{k}" field')
for k in data.keys():
if k not in cls._fields:
raise SpecValidationError(f'CustomConfig got unknown field "{k}"')
return cls(**data)
@property
def filename(self) -> str:
return os.path.basename(self.mount_path)
def __eq__(self, other: Any) -> bool:
if isinstance(other, CustomConfig):
return (
self.content == other.content
and self.mount_path == other.mount_path
)
return NotImplemented
def __repr__(self) -> str:
return f'CustomConfig({self.mount_path})'
def validate(self) -> None:
if not isinstance(self.content, str):
raise SpecValidationError(
f'CustomConfig content must be a string. Got {type(self.content)}')
if not isinstance(self.mount_path, str):
raise SpecValidationError(
f'CustomConfig content must be a string. Got {type(self.mount_path)}')
@contextmanager
def service_spec_allow_invalid_from_json() -> Iterator[None]:
"""
@ -506,6 +559,7 @@ class ServiceSpec(object):
preview_only: bool = False,
networks: Optional[List[str]] = None,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
#: See :ref:`orchestrator-cli-placement-spec`.
@ -545,6 +599,7 @@ class ServiceSpec(object):
self.config = {k.replace(' ', '_'): v for k, v in config.items()}
self.extra_container_args: Optional[List[str]] = extra_container_args
self.custom_configs: Optional[List[CustomConfig]] = custom_configs
@classmethod
@handle_type_error
@ -632,6 +687,8 @@ class ServiceSpec(object):
for k, v in json_spec.items():
if k == 'placement':
v = PlacementSpec.from_json(v)
if k == 'custom_configs':
v = [CustomConfig.from_json(c) for c in v]
if k == 'spec':
args.update(v)
continue
@ -670,6 +727,8 @@ class ServiceSpec(object):
ret['networks'] = self.networks
if self.extra_container_args:
ret['extra_container_args'] = self.extra_container_args
if self.custom_configs:
ret['custom_configs'] = [c.to_json() for c in self.custom_configs]
c = {}
for key, val in sorted(self.__dict__.items(), key=lambda tpl: tpl[0]):
@ -746,12 +805,14 @@ class NFSServiceSpec(ServiceSpec):
networks: Optional[List[str]] = None,
port: Optional[int] = None,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'nfs'
super(NFSServiceSpec, self).__init__(
'nfs', service_id=service_id,
placement=placement, unmanaged=unmanaged, preview_only=preview_only,
config=config, networks=networks, extra_container_args=extra_container_args)
config=config, networks=networks, extra_container_args=extra_container_args,
custom_configs=custom_configs)
self.port = port
@ -809,6 +870,7 @@ class RGWSpec(ServiceSpec):
networks: Optional[List[str]] = None,
subcluster: Optional[str] = None, # legacy, only for from_json on upgrade
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'rgw', service_type
@ -820,7 +882,7 @@ class RGWSpec(ServiceSpec):
'rgw', service_id=service_id,
placement=placement, unmanaged=unmanaged,
preview_only=preview_only, config=config, networks=networks,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args, custom_configs=custom_configs)
#: The RGW realm associated with this service. Needs to be manually created
self.rgw_realm: Optional[str] = rgw_realm
@ -878,13 +940,15 @@ class IscsiServiceSpec(ServiceSpec):
config: Optional[Dict[str, str]] = None,
networks: Optional[List[str]] = None,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'iscsi'
super(IscsiServiceSpec, self).__init__('iscsi', service_id=service_id,
placement=placement, unmanaged=unmanaged,
preview_only=preview_only,
config=config, networks=networks,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args,
custom_configs=custom_configs)
#: RADOS pool where ceph-iscsi config data is stored.
self.pool = pool
@ -947,13 +1011,15 @@ class IngressSpec(ServiceSpec):
unmanaged: bool = False,
ssl: bool = False,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'ingress'
super(IngressSpec, self).__init__(
'ingress', service_id=service_id,
placement=placement, config=config,
networks=networks,
extra_container_args=extra_container_args
extra_container_args=extra_container_args,
custom_configs=custom_configs
)
self.backend_service = backend_service
self.frontend_port = frontend_port
@ -1076,6 +1142,7 @@ class MonitoringSpec(ServiceSpec):
preview_only: bool = False,
port: Optional[int] = None,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type in ['grafana', 'node-exporter', 'prometheus', 'alertmanager',
'loki', 'promtail']
@ -1084,7 +1151,8 @@ class MonitoringSpec(ServiceSpec):
service_type, service_id,
placement=placement, unmanaged=unmanaged,
preview_only=preview_only, config=config,
networks=networks, extra_container_args=extra_container_args)
networks=networks, extra_container_args=extra_container_args,
custom_configs=custom_configs)
self.service_type = service_type
self.port = port
@ -1120,13 +1188,14 @@ class AlertManagerSpec(MonitoringSpec):
port: Optional[int] = None,
secure: bool = False,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'alertmanager'
super(AlertManagerSpec, self).__init__(
'alertmanager', service_id=service_id,
placement=placement, unmanaged=unmanaged,
preview_only=preview_only, config=config, networks=networks, port=port,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args, custom_configs=custom_configs)
# Custom configuration.
#
@ -1171,13 +1240,14 @@ class GrafanaSpec(MonitoringSpec):
port: Optional[int] = None,
initial_admin_password: Optional[str] = None,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'grafana'
super(GrafanaSpec, self).__init__(
'grafana', service_id=service_id,
placement=placement, unmanaged=unmanaged,
preview_only=preview_only, config=config, networks=networks, port=port,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args, custom_configs=custom_configs)
self.initial_admin_password = initial_admin_password
@ -1225,6 +1295,7 @@ class SNMPGatewaySpec(ServiceSpec):
preview_only: bool = False,
port: Optional[int] = None,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'snmp-gateway'
@ -1233,7 +1304,8 @@ class SNMPGatewaySpec(ServiceSpec):
placement=placement,
unmanaged=unmanaged,
preview_only=preview_only,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args,
custom_configs=custom_configs)
self.service_type = service_type
self.snmp_version = snmp_version
@ -1344,6 +1416,7 @@ class MDSSpec(ServiceSpec):
unmanaged: bool = False,
preview_only: bool = False,
extra_container_args: Optional[List[str]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'mds'
super(MDSSpec, self).__init__('mds', service_id=service_id,
@ -1351,7 +1424,8 @@ class MDSSpec(ServiceSpec):
config=config,
unmanaged=unmanaged,
preview_only=preview_only,
extra_container_args=extra_container_args)
extra_container_args=extra_container_args,
custom_configs=custom_configs)
def validate(self) -> None:
super(MDSSpec, self).validate()