Merge PR #34672 into master

* refs/pull/34672/head:
	qa/tasks/cephfs: Enable multiple exports tests
	mgr/nfs: Instead of 'auth del' use 'auth rm'
	qa/tasks/cephfs: Don't enable cephadm in TestNFS
	qa/tasks/cephfs: Add tests for nfs exports
	mgr/volumes/nfs: Fix idempotency of cluster and export commands
	mgr/volumes/nfs: Fix incorrect read only access_type value
	mgr/fs/nfs: Use check_mon_command() instead of mon_command()
	qa/cephfs: Add tests for nfs
	mgr/volumes/nfs: Remove type option from export create interface
	vstart: Instead of CACHEINODE use MDCACHE
	mgr/volumes: Rearrange nfs export interface
	mgr/volumes/nfs: Delete common config object on cluster deletion
	mgr/volumes/nfs: Delete all exports on cluster deletion
	mgr/volumes: Make nfs create export interface idempotent
	vstart: Add watch url for conf-nfs object
	mgr/volumes/nfs: Delete user on removing export
	mgr/volumes: Create user for given path and fs
	vstart: Ensure cephadm and NFS does not conflict
	vstart: Update details about ganesha packages
	mgr/volumes/nfs: Add delete cephfs export command
	mgr/volumes/nfs: Add RADOS notify for common config object
	mgr/volumes/nfs: Pass cluster_id directly to NFSCluster {create, update, delete} methods
	mgr/volumes: Add nfs cluster delete interface
	mgr/volumes: Add nfs cluster update interface
	vstart: Enable test_orchestrator in start_ganesha()
	mgr/volumes: Add placement option to create nfs cluster interface
	mgr/volumes/nfs: Change common ganesha config object name to 'conf-nfs.ganesha-<cluster_id>'
	mgr/volumes/nfs: Call orch nfs apply

Reviewed-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Sebastian Wagner <swagner@suse.com>
Reviewed-by: Ramana Raja <rraja@redhat.com>
Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2020-06-01 11:21:34 -07:00
commit 9442abd152
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB
6 changed files with 407 additions and 116 deletions

View File

@ -17,29 +17,52 @@ Create NFS Ganesha Cluster
.. code:: bash
$ ceph nfs cluster create <type=cephfs> [--size=1] <clusterid>
$ ceph nfs cluster create <type=cephfs> <clusterid> [<placement>]
This creates a common recovery pool for all Ganesha daemons, new user based on
cluster_id and common ganesha config rados object.
Here size denotes the number of ganesha daemons within a cluster and type is
export type. Currently only CephFS is supported.
Here type is export type and placement specifies the size of cluster and hosts.
For more details on placement specification refer the `orchestrator doc
<https://docs.ceph.com/docs/master/mgr/orchestrator/#placement-specification>`_.
Currently only CephFS export type is supported.
.. note:: This does not setup ganesha recovery database and start the daemons.
It needs to be done manually if not using vstart for creating
clusters. Please refer `ganesha-rados-grace doc
<https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/doc/man/ganesha-rados-grace.rst>`_
Update NFS Ganesha Cluster
==========================
.. code:: bash
$ ceph nfs cluster update <clusterid> <placement>
This updates the deployed cluster according to the placement value.
Delete NFS Ganesha Cluster
==========================
.. code:: bash
$ ceph nfs cluster delete <clusterid>
This deletes the deployed cluster.
Create CephFS Export
====================
.. code:: bash
$ ceph nfs export create <type=cephfs> <fsname> <binding> <clusterid> [--readonly] [--path=/path/in/cephfs]
$ ceph nfs export create cephfs <fsname> <clusterid> <binding> [--readonly] [--path=/path/in/cephfs]
It creates export rados objects containing the export block. Here binding is
the pseudo root name and type is export type. Currently only CephFS is
supported.
the pseudo root name and type is export type.
Delete CephFS Export
====================
.. code:: bash
$ ceph nfs export delete <clusterid> <binding>
It deletes an export in cluster based on pseudo root name (binding).
Configuring NFS-Ganesha to export CephFS with vstart
====================================================

View File

@ -0,0 +1,17 @@
roles:
- - host.a
- osd.0
- osd.1
- osd.2
- mon.a
- mgr.a
- client.0
tasks:
- install:
- cephadm:
- cephadm.shell:
host.a:
- ceph orch apply mds 1
- cephfs_test_runner:
modules:
- tasks.cephfs.test_nfs

120
qa/tasks/cephfs/test_nfs.py Normal file
View File

@ -0,0 +1,120 @@
import os
import json
import time
import errno
import logging
from io import BytesIO
from tasks.mgr.mgr_test_case import MgrTestCase
from teuthology.exceptions import CommandFailedError
log = logging.getLogger(__name__)
class TestNFS(MgrTestCase):
def _cmd(self, *args):
return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
def _nfs_cmd(self, *args):
return self._cmd("nfs", *args)
def _orch_cmd(self, *args):
return self._cmd("orch", *args)
def _sys_cmd(self, cmd):
cmd[0:0] = ['sudo']
ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO())
stdout = ret[0].stdout
if stdout:
return stdout.getvalue()
def setUp(self):
super(TestNFS, self).setUp()
self.cluster_id = "test"
self.export_type = "cephfs"
self.pseudo_path = "/cephfs"
self.path = "/"
self.fs_name = "nfs-cephfs"
self.expected_name = "nfs.ganesha-test"
def _check_port_status(self):
log.info("NETSTAT")
self._sys_cmd(['netstat', '-tnlp'])
def _check_nfs_server_status(self):
res = self._sys_cmd(['systemctl', 'status', 'nfs-server'])
if isinstance(res, bytes) and b'Active: active' in res:
self._disable_nfs()
def _disable_nfs(self):
log.info("Disabling NFS")
self._sys_cmd(['systemctl', 'disable', 'nfs-server', '--now'])
def _check_nfs_status(self):
return self._orch_cmd('ls', 'nfs')
def _test_create_cluster(self):
self._check_nfs_server_status()
self._nfs_cmd('cluster', 'create', self.export_type, self.cluster_id)
time.sleep(8)
orch_output = self._check_nfs_status()
expected_status = '1/1'
if self.expected_name not in orch_output or expected_status not in orch_output:
raise RuntimeError("NFS Ganesha cluster could not be deployed")
def _test_delete_cluster(self):
self._nfs_cmd('cluster', 'delete', self.cluster_id)
time.sleep(8)
orch_output = self._check_nfs_status()
self.assertEqual("No services reported\n", orch_output)
def _create_export(self, export_id, create_fs=False, extra_cmd=None):
if create_fs:
self._cmd('fs', 'volume', 'create', self.fs_name)
export_cmd = ['nfs', 'export', 'create', 'cephfs', self.fs_name, self.cluster_id]
if isinstance(extra_cmd, list):
export_cmd.extend(extra_cmd)
else:
export_cmd.append(self.pseudo_path)
self._cmd(*export_cmd)
res = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'get', f'export-{export_id}', '-'])
if res == b'':
raise RuntimeError("Export cannot be created")
def _create_default_export(self):
self._test_create_cluster()
self._create_export(export_id='1', create_fs=True)
def _delete_export(self):
self._nfs_cmd('export', 'delete', self.cluster_id, self.pseudo_path)
def _check_export_obj_deleted(self, conf_obj=False):
rados_obj_ls = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'ls'])
if b'export-' in rados_obj_ls or (conf_obj and b'conf-nfs' in rados_obj_ls):
raise RuntimeError("Delete export failed")
def test_create_and_delete_cluster(self):
self._test_create_cluster()
self._test_delete_cluster()
def test_export_create_and_delete(self):
self._create_default_export()
self._delete_export()
self._check_export_obj_deleted()
self._test_delete_cluster()
def test_create_multiple_exports(self):
#Export-1 with default values
self._create_default_export()
#Export-2 with r only
self._create_export(export_id='2', extra_cmd=[self.pseudo_path+'1', '--readonly'])
#Export-3 for subvolume with r only
self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol')
fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol')
self._create_export(export_id='3', extra_cmd=[self.pseudo_path+'2', '--readonly', fs_path.strip()])
#Export-4 for subvolume
self._create_export(export_id='4', extra_cmd=[self.pseudo_path+'3', fs_path.strip()])
self._test_delete_cluster()
self._check_export_obj_deleted(conf_obj=True)

View File

@ -2,6 +2,8 @@ import json
import errno
import logging
from ceph.deployment.service_spec import NFSServiceSpec, PlacementSpec
import cephfs
import orchestrator
from .fs_util import create_pool
@ -200,30 +202,33 @@ class FSExport(object):
fs_map = self.mgr.get('fs_map')
return fs_name in [fs['mdsmap']['fs_name'] for fs in fs_map['filesystems']]
def check_pseudo_path(self, pseudo_path):
def _fetch_export(self, pseudo_path):
for ex in self.exports[self.rados_namespace]:
if ex.pseudo == pseudo_path:
return True
return False
return ex
def _create_user_key(self, entity):
osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data=a'.format(
self.rados_pool, self.rados_namespace)
ret, out, err = self.mgr.mon_command({
def _create_user_key(self, entity, path, fs_name):
osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
self.rados_pool, self.rados_namespace, fs_name)
ret, out, err = self.mgr.check_mon_command({
'prefix': 'auth get-or-create',
'entity': 'client.{}'.format(entity),
'caps' : ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path=/'],
'caps' : ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)],
'format': 'json',
})
if ret!= 0:
return ret, err
json_res = json.loads(out)
log.info("Export user is {}".format(json_res[0]['entity']))
log.info("Export user created is {}".format(json_res[0]['entity']))
return json_res[0]['entity'], json_res[0]['key']
def _delete_user(self, entity):
self.mgr.check_mon_command({
'prefix': 'auth rm',
'entity': 'client.{}'.format(entity),
})
log.info(f"Export user deleted is {entity}")
def format_path(self, path):
if path is not None:
path = path.strip()
@ -248,14 +253,29 @@ class FSExport(object):
ioctx.set_namespace(self.rados_namespace)
if append:
ioctx.append(obj, raw_config.encode('utf-8'))
ioctx.notify(obj)
else:
ioctx.write_full(obj, raw_config.encode('utf-8'))
log.debug(
"write configuration into rados object %s/%s/%s:\n%s",
self.rados_pool, self.rados_namespace, obj, raw_config)
def _update_common_conf(self, ex_id):
common_conf = 'conf-nfs'
def _delete_export_url(self, obj, ex_id):
export_name = 'export-{}'.format(ex_id)
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
if self.rados_namespace:
ioctx.set_namespace(self.rados_namespace)
export_urls = ioctx.read(obj)
url = '%url "{}"\n\n'.format(self.make_rados_url(export_name))
export_urls = export_urls.replace(url.encode('utf-8'), b'')
ioctx.remove_object(export_name)
ioctx.write_full(obj, export_urls)
ioctx.notify(obj)
log.debug("Export deleted: {}".format(url))
def _update_common_conf(self, cluster_id, ex_id):
common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id)
conf_blocks = {
'block_name': '%url',
'value': self.make_rados_url(
@ -267,52 +287,82 @@ class FSExport(object):
self.exports[self.rados_namespace].append(export)
conf_block = export.to_export_block()
self._write_raw_config(conf_block, "export-{}".format(export.export_id))
self._update_common_conf(export.export_id)
self._update_common_conf(export.cluster_id, export.export_id)
def create_export(self, export_type, fs_name, pseudo_path, read_only, path, cluster_id):
if export_type != 'cephfs':
return -errno.EINVAL,"", f"Invalid export type: {export_type}"
#TODO Check if valid cluster
if cluster_id not in self.exports:
self.exports[cluster_id] = []
def create_export(self, fs_name, cluster_id, pseudo_path, read_only, path):
try:
if not self.check_fs(fs_name):
return -errno.EINVAL,"", "Invalid CephFS name"
self.rados_namespace = cluster_id
if not self.check_fs(fs_name) or self.check_pseudo_path(pseudo_path):
return -errno.EINVAL,"", "Invalid CephFS name or export already exists"
#TODO Check if valid cluster
if cluster_id not in self.exports:
self.exports[cluster_id] = []
user_id, key = self._create_user_key(cluster_id)
if isinstance(user_id, int):
return user_id, "", key
access_type = "RW"
if read_only:
access_type = "R"
self.rados_namespace = cluster_id
ex_dict = {
'path': self.format_path(path),
'pseudo': self.format_path(pseudo_path),
'cluster_id': cluster_id,
'access_type': access_type,
'fsal': {"name": "CEPH", "user_id":cluster_id, "fs_name": fs_name, "sec_label_xattr": ""},
'clients': []
}
if not self._fetch_export(pseudo_path):
ex_id = self._gen_export_id()
user_id = f"{cluster_id}{ex_id}"
user_out, key = self._create_user_key(user_id, path, fs_name)
access_type = "RW"
if read_only:
access_type = "RO"
ex_dict = {
'path': self.format_path(path),
'pseudo': self.format_path(pseudo_path),
'cluster_id': cluster_id,
'access_type': access_type,
'fsal': {"name": "CEPH", "user_id": user_id,
"fs_name": fs_name, "sec_label_xattr": ""},
'clients': []
}
export = Export.from_dict(ex_id, ex_dict)
export.fsal.cephx_key = key
self._save_export(export)
result = {
"bind": pseudo_path,
"fs": fs_name,
"path": path,
"cluster": cluster_id,
"mode": access_type,
}
return (0, json.dumps(result, indent=4), '')
return 0, "", "Export already exists"
except Exception as e:
log.warning("Failed to create exports")
return -errno.EINVAL, "", str(e)
ex_id = self._gen_export_id()
export = Export.from_dict(ex_id, ex_dict)
export.fsal.cephx_key = key
self._save_export(export)
def delete_export(self, cluster_id, pseudo_path, export_obj=None):
try:
self.rados_namespace = cluster_id
if export_obj:
export = export_obj
else:
export = self._fetch_export(pseudo_path)
result = {
"bind": pseudo_path,
"fs": fs_name,
"path": path,
"cluster": cluster_id,
"mode": access_type,
}
if export:
common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id)
self._delete_export_url(common_conf, export.export_id)
self.exports[cluster_id].remove(export)
self._delete_user(export.fsal.user_id)
return 0, "Successfully deleted export", ""
return 0, "", "Export does not exist"
except KeyError:
return -errno.EINVAL, "", "Cluster does not exist"
except Exception as e:
log.warning("Failed to delete exports")
return -errno.EINVAL, "", str(e)
return (0, json.dumps(result, indent=4), '')
def delete_export(self, ex_id):
raise NotImplementedError()
def delete_all_exports(self, cluster_id):
try:
export_list = list(self.exports[cluster_id])
for export in export_list:
ret, out, err = self.delete_export(cluster_id, None, export)
if ret != 0:
raise Exception("Failed to delete exports: {err} and {ret}")
log.info(f"All exports successfully deleted for cluster id: {cluster_id}")
except KeyError:
log.info("No exports to delete")
def make_rados_url(self, obj):
if self.rados_namespace:
@ -320,50 +370,107 @@ class FSExport(object):
return "rados://{}/{}".format(self.rados_pool, obj)
class NFSCluster:
def __init__(self, mgr, cluster_id):
self.cluster_id = "ganesha-%s" % cluster_id
def __init__(self, mgr):
self.pool_name = 'nfs-ganesha'
self.pool_ns = cluster_id
self.pool_ns = ''
self.mgr = mgr
def create_empty_rados_obj(self):
common_conf = 'conf-nfs'
common_conf = self._get_common_conf_obj_name()
result = ''
with self.mgr.rados.open_ioctx(self.pool_name) as ioctx:
if self.pool_ns:
ioctx.set_namespace(self.pool_ns)
ioctx.write_full(common_conf, result.encode('utf-8'))
log.debug(
"write configuration into rados object %s/%s/nfs-conf\n",
self.pool_name, self.pool_ns)
"write configuration into rados object %s/%s/%s\n",
self.pool_name, self.pool_ns, common_conf)
def create_nfs_cluster(self, export_type, size):
def delete_common_config_obj(self):
common_conf = self._get_common_conf_obj_name()
with self.mgr.rados.open_ioctx(self.pool_name) as ioctx:
if self.pool_ns:
ioctx.set_namespace(self.pool_ns)
ioctx.remove_object(common_conf)
log.info(f"Deleted object:{common_conf}")
def available_clusters(self):
completion = self.mgr.describe_service(service_type='nfs')
self.mgr._orchestrator_wait([completion])
orchestrator.raise_if_exception(completion)
return [cluster.spec.service_id for cluster in completion.result]
def _set_cluster_id(self, cluster_id):
self.cluster_id = "ganesha-%s" % cluster_id
def _set_pool_namespace(self, cluster_id):
self.pool_ns = cluster_id
def _get_common_conf_obj_name(self):
return 'conf-nfs.{}'.format(self.cluster_id)
def _call_orch_apply_nfs(self, placement):
spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id,
pool=self.pool_name, namespace=self.pool_ns,
placement=PlacementSpec.from_string(placement))
completion = self.mgr.apply_nfs(spec)
self.mgr._orchestrator_wait([completion])
orchestrator.raise_if_exception(completion)
def create_nfs_cluster(self, export_type, cluster_id, placement):
if export_type != 'cephfs':
return -errno.EINVAL,"", f"Invalid export type: {export_type}"
return -errno.EINVAL, "", f"Invalid export type: {export_type}"
try:
pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])]
pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])]
client = 'client.%s' % self.cluster_id
if self.pool_name not in pool_list:
r, out, err = create_pool(self.mgr, self.pool_name)
if r != 0:
return r, out, err
log.info(f"Pool Status: {out}")
if self.pool_name not in pool_list:
r, out, err = create_pool(self.mgr, self.pool_name)
if r != 0:
return r, out, err
log.info("{}".format(out))
self.mgr.check_mon_command({'prefix': 'osd pool application enable',
'pool': self.pool_name, 'app': 'nfs'})
command = {'prefix': 'osd pool application enable', 'pool': self.pool_name, 'app': 'nfs'}
r, out, err = self.mgr.mon_command(command)
self._set_pool_namespace(cluster_id)
self._set_cluster_id(cluster_id)
self.create_empty_rados_obj()
if r != 0:
return r, out, err
if self.cluster_id not in self.available_clusters():
self._call_orch_apply_nfs(placement)
return 0, "NFS Cluster Created Successfully", ""
return 0, "", f"{self.cluster_id} cluster already exists"
except Exception as e:
log.warning("NFS Cluster could not be created")
return -errno.EINVAL, "", str(e)
self.create_empty_rados_obj()
#TODO Check if cluster exists
#TODO Call Orchestrator to deploy cluster
def update_nfs_cluster(self, cluster_id, placement):
try:
self._set_pool_namespace(cluster_id)
self._set_cluster_id(cluster_id)
if self.cluster_id in self.available_clusters():
self._call_orch_apply_nfs(placement)
return 0, "NFS Cluster Updated Successfully", ""
return -errno.EINVAL, "", "Cluster does not exist"
except Exception as e:
log.warning("NFS Cluster could not be updated")
return -errno.EINVAL, "", str(e)
return 0, "", "NFS Cluster Created Successfully"
def delete_nfs_cluster(self, cluster_id):
try:
self._set_cluster_id(cluster_id)
cluster_list = self.available_clusters()
def update_nfs_cluster(self, size):
raise NotImplementedError()
def delete_nfs_cluster(self):
raise NotImplementedError()
if self.cluster_id in self.available_clusters():
self.mgr.fs_export.delete_all_exports(cluster_id)
completion = self.mgr.remove_service('nfs.' + self.cluster_id)
self.mgr._orchestrator_wait([completion])
orchestrator.raise_if_exception(completion)
if len(cluster_list) == 1:
self.delete_common_config_obj()
return 0, "NFS Cluster Deleted Successfully", ""
return 0, "", "Cluster does not exist"
except Exception as e:
log.warning("Failed to delete NFS Cluster")
return -errno.EINVAL, "", str(e)

View File

@ -223,19 +223,19 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
'perm': 'r'
},
{
'cmd': 'nfs export create '
'name=type,type=CephString '
'cmd': 'nfs export create cephfs '
'name=fsname,type=CephString '
'name=binding,type=CephString '
'name=attach,type=CephString '
'name=binding,type=CephString '
'name=readonly,type=CephBool,req=false '
'name=path,type=CephString,req=false ',
'desc': "Create a cephfs export",
'perm': 'rw'
},
{
'cmd': 'fs nfs export delete '
'name=export_id,type=CephInt,req=true ',
'cmd': 'nfs export delete '
'name=attach,type=CephString '
'name=binding,type=CephString ',
'desc': "Delete a cephfs export",
'perm': 'rw'
},
@ -243,10 +243,23 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
'cmd': 'nfs cluster create '
'name=type,type=CephString '
'name=clusterid,type=CephString '
'name=size,type=CephInt,req=false ',
'name=placement,type=CephString,req=false ',
'desc': "Create an NFS Cluster",
'perm': 'rw'
},
{
'cmd': 'nfs cluster update '
'name=clusterid,type=CephString '
'name=placement,type=CephString ',
'desc': "Updates an NFS Cluster",
'perm': 'rw'
},
{
'cmd': 'nfs cluster delete '
'name=clusterid,type=CephString ',
'desc': "Deletes an NFS Cluster",
'perm': 'rw'
},
# volume ls [recursive]
# subvolume ls <volume>
# volume authorize/deauthorize
@ -268,6 +281,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
super(Module, self).__init__(*args, **kwargs)
self.vc = VolumeClient(self)
self.fs_export = FSExport(self)
self.nfs = NFSCluster(self)
def __del__(self):
self.vc.shutdown()
@ -417,16 +431,20 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
return self.vc.clone_cancel(
vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
def _cmd_nfs_export_create(self, inbuf, cmd):
def _cmd_nfs_export_create_cephfs(self, inbuf, cmd):
#TODO Extend export creation for rgw.
return self.fs_export.create_export(export_type=cmd['type'], fs_name=cmd['fsname'],
pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False),
path=cmd.get('path', '/'), cluster_id=cmd.get('attach'))
return self.fs_export.create_export(fs_name=cmd['fsname'], cluster_id=cmd['attach'],
pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), path=cmd.get('path', '/'))
def _cmd_fs_nfs_export_delete(self, inbuf, cmd):
return self.fs_export.delete_export(cmd['export_id'])
def _cmd_nfs_export_delete(self, inbuf, cmd):
return self.fs_export.delete_export(cluster_id=cmd['attach'], pseudo_path=cmd['binding'])
def _cmd_nfs_cluster_create(self, inbuf, cmd):
#TODO add placement option
nfs_cluster_obj = NFSCluster(self, cmd['clusterid'])
return nfs_cluster_obj.create_nfs_cluster(export_type=cmd['type'], size=cmd.get('size', 1))
return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'],
placement=cmd.get('placement', None))
def _cmd_nfs_cluster_update(self, inbuf, cmd):
return self.nfs.update_nfs_cluster(cluster_id=cmd['clusterid'], placement=cmd['placement'])
def _cmd_nfs_cluster_delete(self, inbuf, cmd):
return self.nfs.delete_nfs_cluster(cluster_id=cmd['clusterid'])

View File

@ -1064,11 +1064,12 @@ EOF
}
# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
# (version 2.7.6-2 and above) packages installed. On Fedora>=30 these packages
# can be installed directly with 'dnf'. For CentOS>=8 the packages need to be
# downloaded first from https://download.nfs-ganesha.org/2.7/2.7.6/CentOS/ and
# then install it. Similarly for Ubuntu 16.04 follow the instructions on
# https://launchpad.net/~nfs-ganesha/+archive/ubuntu/nfs-ganesha-2.7
# nfs-ganesha-rados-urls (version 2.8.3 and above) packages installed. On
# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
# the packages need to be downloaded first from
# https://download.nfs-ganesha.org/2.8/2.8.3/CentOS and then installed.
# Similarly for Ubuntu 16.04 follow the instructions on
# https://launchpad.net/~nfs-ganesha/+archive/ubuntu/nfs-ganesha-2.8
start_ganesha() {
GANESHA_PORT=$(($CEPH_PORT + 4000))
@ -1084,6 +1085,7 @@ start_ganesha() {
test_user="ganesha-$name"
pool_name="nfs-ganesha"
namespace=$name
url="rados://$pool_name/$namespace/conf-nfs.$test_user"
prun rm -rf $ganesha_dir
prun mkdir -p $ganesha_dir
@ -1092,6 +1094,9 @@ start_ganesha() {
osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
mds "allow rw path=/" \
>> "$keyring_fn"
ceph_adm mgr module enable test_orchestrator
ceph_adm orch set backend test_orchestrator
prun ceph_adm nfs cluster create cephfs $name
echo "NFS_CORE_PARAM {
@ -1101,7 +1106,7 @@ start_ganesha() {
NFS_Port = $port;
}
CACHEINODE {
MDCACHE {
Dir_Chunk = 0;
NParts = 1;
Cache_Size = 1;
@ -1112,7 +1117,7 @@ start_ganesha() {
Minor_Versions = 1, 2;
}
%url rados://$pool_name/$namespace/conf-nfs
%url $url
RADOS_KV {
pool = $pool_name;
@ -1123,6 +1128,7 @@ start_ganesha() {
RADOS_URLS {
Userid = $test_user;
watch_url = \"$url\";
}" > "$ganesha_dir/ganesha.conf"
wconf <<EOF
[ganesha.$name]
@ -1133,7 +1139,7 @@ start_ganesha() {
pid file = $ganesha_dir/ganesha.pid
EOF
prun ceph_adm nfs export create cephfs "a" "/cephfs" $name
prun ceph_adm nfs export create cephfs "a" $name "/cephfs"
prun ganesha-rados-grace -p $pool_name -n $namespace add $name
prun ganesha-rados-grace -p $pool_name -n $namespace
@ -1363,7 +1369,7 @@ EOF
fi
# Ganesha Daemons
if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
if [ $GANESHA_DAEMON_NUM -gt 0 ] && [ "$cephadm" -eq 0 ]; then
start_ganesha
fi