From 1e1a1b0ae80646c06855e94a21b8d5c83a0e3955 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Mon, 20 Apr 2020 11:42:18 +0530 Subject: [PATCH 01/28] mgr/volumes/nfs: Call orch nfs apply Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 4a868caec3a..c83e5960b65 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -2,6 +2,8 @@ import json import errno import logging +from ceph.deployment.service_spec import NFSServiceSpec, PlacementSpec + import cephfs import orchestrator from .fs_util import create_pool @@ -337,6 +339,27 @@ class NFSCluster: "write configuration into rados object %s/%s/nfs-conf\n", self.pool_name, self.pool_ns) + def check_cluster_exists(self): + try: + completion = self.mgr.describe_service(service_type='nfs') + self.mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + return self.cluster_id in [cluster.spec.service_id for cluster in completion.result] + except Exception as e: + log.exception(str(e)) + return True + + def _call_orch_apply_nfs(self, size): + spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id, + pool=self.pool_name, namespace=self.pool_ns, + placement=PlacementSpec.from_string(str(size))) + try: + completion = self.mgr.apply_nfs(spec) + self.mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + except Exception as e: + log.exception("Failed to create NFS daemons:{}".format(e)) + def create_nfs_cluster(self, export_type, size): if export_type != 'cephfs': return -errno.EINVAL,"", f"Invalid export type: {export_type}" @@ -357,8 +380,11 @@ class NFSCluster: return r, out, err self.create_empty_rados_obj() - #TODO Check if cluster exists - #TODO Call Orchestrator to deploy cluster + + if self.check_cluster_exists(): + log.info(f"{self.cluster_id} cluster already exists") + else: + self._call_orch_apply_nfs(size) return 0, "", "NFS Cluster Created Successfully" From 7f584fd574e471363597d9f366e8336deba72222 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 21 Apr 2020 15:26:57 +0530 Subject: [PATCH 02/28] mgr/volumes/nfs: Change common ganesha config object name to 'conf-nfs.ganesha-' Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index c83e5960b65..d53e00617ee 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -256,8 +256,8 @@ class FSExport(object): "write configuration into rados object %s/%s/%s:\n%s", self.rados_pool, self.rados_namespace, obj, raw_config) - def _update_common_conf(self, ex_id): - common_conf = 'conf-nfs' + def _update_common_conf(self, cluster_id, ex_id): + common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id) conf_blocks = { 'block_name': '%url', 'value': self.make_rados_url( @@ -269,7 +269,7 @@ class FSExport(object): self.exports[self.rados_namespace].append(export) conf_block = export.to_export_block() self._write_raw_config(conf_block, "export-{}".format(export.export_id)) - self._update_common_conf(export.export_id) + self._update_common_conf(export.cluster_id, export.export_id) def create_export(self, export_type, fs_name, pseudo_path, read_only, path, cluster_id): if export_type != 'cephfs': @@ -329,15 +329,15 @@ class NFSCluster: self.mgr = mgr def create_empty_rados_obj(self): - common_conf = 'conf-nfs' + common_conf = 'conf-nfs.{}'.format(self.cluster_id) result = '' with self.mgr.rados.open_ioctx(self.pool_name) as ioctx: if self.pool_ns: ioctx.set_namespace(self.pool_ns) ioctx.write_full(common_conf, result.encode('utf-8')) log.debug( - "write configuration into rados object %s/%s/nfs-conf\n", - self.pool_name, self.pool_ns) + "write configuration into rados object %s/%s/%s\n", + self.pool_name, self.pool_ns, common_conf) def check_cluster_exists(self): try: From b16190bb5fb61754199cf9731c496888299c983a Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 21 Apr 2020 18:23:04 +0530 Subject: [PATCH 03/28] mgr/volumes: Add placement option to create nfs cluster interface Signed-off-by: Varsha Rao --- doc/cephfs/fs-nfs-exports.rst | 13 +++++-------- src/pybind/mgr/volumes/fs/nfs.py | 8 ++++---- src/pybind/mgr/volumes/module.py | 6 +++--- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/doc/cephfs/fs-nfs-exports.rst b/doc/cephfs/fs-nfs-exports.rst index f18bbbae170..b076b88280c 100644 --- a/doc/cephfs/fs-nfs-exports.rst +++ b/doc/cephfs/fs-nfs-exports.rst @@ -17,18 +17,15 @@ Create NFS Ganesha Cluster .. code:: bash - $ ceph nfs cluster create [--size=1] + $ ceph nfs cluster create [] This creates a common recovery pool for all Ganesha daemons, new user based on cluster_id and common ganesha config rados object. -Here size denotes the number of ganesha daemons within a cluster and type is -export type. Currently only CephFS is supported. - -.. note:: This does not setup ganesha recovery database and start the daemons. - It needs to be done manually if not using vstart for creating - clusters. Please refer `ganesha-rados-grace doc - `_ +Here type is export type and placement specifies the size of cluster and hosts. +For more details on placement specification refer the `orchestrator doc +`_. +Currently only CephFS export type is supported. Create CephFS Export ==================== diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index d53e00617ee..f7084b0d99b 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -349,10 +349,10 @@ class NFSCluster: log.exception(str(e)) return True - def _call_orch_apply_nfs(self, size): + def _call_orch_apply_nfs(self, placement): spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id, pool=self.pool_name, namespace=self.pool_ns, - placement=PlacementSpec.from_string(str(size))) + placement=PlacementSpec.from_string(placement)) try: completion = self.mgr.apply_nfs(spec) self.mgr._orchestrator_wait([completion]) @@ -360,7 +360,7 @@ class NFSCluster: except Exception as e: log.exception("Failed to create NFS daemons:{}".format(e)) - def create_nfs_cluster(self, export_type, size): + def create_nfs_cluster(self, export_type, placement): if export_type != 'cephfs': return -errno.EINVAL,"", f"Invalid export type: {export_type}" @@ -384,7 +384,7 @@ class NFSCluster: if self.check_cluster_exists(): log.info(f"{self.cluster_id} cluster already exists") else: - self._call_orch_apply_nfs(size) + self._call_orch_apply_nfs(placement) return 0, "", "NFS Cluster Created Successfully" diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index 49c9c17cd2f..23095f85bd4 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -243,7 +243,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'cmd': 'nfs cluster create ' 'name=type,type=CephString ' 'name=clusterid,type=CephString ' - 'name=size,type=CephInt,req=false ', + 'name=placement,type=CephString,req=false ', 'desc': "Create an NFS Cluster", 'perm': 'rw' }, @@ -427,6 +427,6 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): return self.fs_export.delete_export(cmd['export_id']) def _cmd_nfs_cluster_create(self, inbuf, cmd): - #TODO add placement option nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) - return nfs_cluster_obj.create_nfs_cluster(export_type=cmd['type'], size=cmd.get('size', 1)) + return nfs_cluster_obj.create_nfs_cluster(export_type=cmd['type'], + placement=cmd.get('placement', None)) From 16d11d1f67226dbede7c706e17418dfa278c7c73 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 21 Apr 2020 18:50:04 +0530 Subject: [PATCH 04/28] vstart: Enable test_orchestrator in start_ganesha() Signed-off-by: Varsha Rao --- src/vstart.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/vstart.sh b/src/vstart.sh index 0acef0dbdbf..939219c4461 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1092,6 +1092,9 @@ start_ganesha() { osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \ mds "allow rw path=/" \ >> "$keyring_fn" + + ceph_adm mgr module enable test_orchestrator + ceph_adm orch set backend test_orchestrator prun ceph_adm nfs cluster create cephfs $name echo "NFS_CORE_PARAM { @@ -1112,7 +1115,7 @@ start_ganesha() { Minor_Versions = 1, 2; } - %url rados://$pool_name/$namespace/conf-nfs + %url rados://$pool_name/$namespace/conf-nfs.$test_user RADOS_KV { pool = $pool_name; From 9f97401f2794d51cfd7c44564a82b747e88420f9 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 23 Apr 2020 19:10:48 +0530 Subject: [PATCH 05/28] mgr/volumes: Add nfs cluster update interface $ ceph nfs cluster update This updates the existing deployed cluster according to placement value. Signed-off-by: Varsha Rao --- doc/cephfs/fs-nfs-exports.rst | 9 +++++++++ src/pybind/mgr/volumes/fs/nfs.py | 8 ++++++-- src/pybind/mgr/volumes/module.py | 10 ++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/doc/cephfs/fs-nfs-exports.rst b/doc/cephfs/fs-nfs-exports.rst index b076b88280c..e92935223b9 100644 --- a/doc/cephfs/fs-nfs-exports.rst +++ b/doc/cephfs/fs-nfs-exports.rst @@ -27,6 +27,15 @@ For more details on placement specification refer the `orchestrator doc `_. Currently only CephFS export type is supported. +Update NFS Ganesha Cluster +========================== + +.. code:: bash + + $ ceph nfs cluster update + +This updates the deployed cluster according to the placement value. + Create CephFS Export ==================== diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index f7084b0d99b..d136362f965 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -388,8 +388,12 @@ class NFSCluster: return 0, "", "NFS Cluster Created Successfully" - def update_nfs_cluster(self, size): - raise NotImplementedError() + def update_nfs_cluster(self, placement): + if not self.check_cluster_exists(): + return -errno.EINVAL, "", "Cluster does not exist" + + self._call_orch_apply_nfs(placement) + return 0, "", "NFS Cluster Updated Successfully" def delete_nfs_cluster(self): raise NotImplementedError() diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index 23095f85bd4..bae930dc910 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -247,6 +247,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'desc': "Create an NFS Cluster", 'perm': 'rw' }, + { + 'cmd': 'nfs cluster update ' + 'name=clusterid,type=CephString ' + 'name=placement,type=CephString ', + 'desc': "Updates an NFS Cluster", + 'perm': 'rw' + }, # volume ls [recursive] # subvolume ls # volume authorize/deauthorize @@ -430,3 +437,6 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) return nfs_cluster_obj.create_nfs_cluster(export_type=cmd['type'], placement=cmd.get('placement', None)) + def _cmd_nfs_cluster_update(self, inbuf, cmd): + nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) + return nfs_cluster_obj.update_nfs_cluster(placement=cmd['placement']) From db2734a50fd3babd43892af0437edbe9a8130c99 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 23 Apr 2020 21:16:16 +0530 Subject: [PATCH 06/28] mgr/volumes: Add nfs cluster delete interface $ ceph nfs cluster delete This deletes the deployed cluster. Signed-off-by: Varsha Rao --- doc/cephfs/fs-nfs-exports.rst | 9 +++++++++ src/pybind/mgr/volumes/fs/nfs.py | 13 ++++++++++++- src/pybind/mgr/volumes/module.py | 11 +++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/doc/cephfs/fs-nfs-exports.rst b/doc/cephfs/fs-nfs-exports.rst index e92935223b9..82353fb622a 100644 --- a/doc/cephfs/fs-nfs-exports.rst +++ b/doc/cephfs/fs-nfs-exports.rst @@ -36,6 +36,15 @@ Update NFS Ganesha Cluster This updates the deployed cluster according to the placement value. +Delete NFS Ganesha Cluster +========================== + +.. code:: bash + + $ ceph nfs cluster delete + +This deletes the deployed cluster. + Create CephFS Export ==================== diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index d136362f965..12a802d2a96 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -396,4 +396,15 @@ class NFSCluster: return 0, "", "NFS Cluster Updated Successfully" def delete_nfs_cluster(self): - raise NotImplementedError() + if self.check_cluster_exists(): + try: + completion = self.mgr.remove_service('nfs.' + self.cluster_id) + self.mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + except Exception as e: + log.exception("Failed to delete NFS Cluster") + return -errno.EINVAL, "", str(e) + else: + log.warn("Cluster does not exist") + + return 0, "", "NFS Cluster Deleted Successfully" diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index bae930dc910..ba9d11a9dd8 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -254,6 +254,12 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'desc': "Updates an NFS Cluster", 'perm': 'rw' }, + { + 'cmd': 'nfs cluster delete ' + 'name=clusterid,type=CephString ', + 'desc': "Deletes an NFS Cluster", + 'perm': 'rw' + }, # volume ls [recursive] # subvolume ls # volume authorize/deauthorize @@ -437,6 +443,11 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) return nfs_cluster_obj.create_nfs_cluster(export_type=cmd['type'], placement=cmd.get('placement', None)) + def _cmd_nfs_cluster_update(self, inbuf, cmd): nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) return nfs_cluster_obj.update_nfs_cluster(placement=cmd['placement']) + + def _cmd_nfs_cluster_delete(self, inbuf, cmd): + nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) + return nfs_cluster_obj.delete_nfs_cluster() From fff59e686aeae615935fe1ee8ad84352aa6757f3 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Mon, 27 Apr 2020 17:00:03 +0530 Subject: [PATCH 07/28] mgr/volumes/nfs: Pass cluster_id directly to NFSCluster {create, update, delete} methods Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 24 +++++++++++++++++------- src/pybind/mgr/volumes/module.py | 12 +++++------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 12a802d2a96..b3319e15140 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -322,10 +322,9 @@ class FSExport(object): return "rados://{}/{}".format(self.rados_pool, obj) class NFSCluster: - def __init__(self, mgr, cluster_id): - self.cluster_id = "ganesha-%s" % cluster_id + def __init__(self, mgr): self.pool_name = 'nfs-ganesha' - self.pool_ns = cluster_id + self.pool_ns = '' self.mgr = mgr def create_empty_rados_obj(self): @@ -349,6 +348,12 @@ class NFSCluster: log.exception(str(e)) return True + def _set_cluster_id(self, cluster_id): + self.cluster_id = "ganesha-%s" % cluster_id + + def _set_pool_namespace(self, cluster_id): + self.pool_ns = cluster_id + def _call_orch_apply_nfs(self, placement): spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id, pool=self.pool_name, namespace=self.pool_ns, @@ -360,12 +365,11 @@ class NFSCluster: except Exception as e: log.exception("Failed to create NFS daemons:{}".format(e)) - def create_nfs_cluster(self, export_type, placement): + def create_nfs_cluster(self, export_type, cluster_id, placement): if export_type != 'cephfs': return -errno.EINVAL,"", f"Invalid export type: {export_type}" pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])] - client = 'client.%s' % self.cluster_id if self.pool_name not in pool_list: r, out, err = create_pool(self.mgr, self.pool_name) @@ -379,6 +383,8 @@ class NFSCluster: if r != 0: return r, out, err + self._set_pool_namespace(cluster_id) + self._set_cluster_id(cluster_id) self.create_empty_rados_obj() if self.check_cluster_exists(): @@ -388,14 +394,18 @@ class NFSCluster: return 0, "", "NFS Cluster Created Successfully" - def update_nfs_cluster(self, placement): + def update_nfs_cluster(self, cluster_id, placement): + self._set_pool_namespace(cluster_id) + self._set_cluster_id(cluster_id) + if not self.check_cluster_exists(): return -errno.EINVAL, "", "Cluster does not exist" self._call_orch_apply_nfs(placement) return 0, "", "NFS Cluster Updated Successfully" - def delete_nfs_cluster(self): + def delete_nfs_cluster(self, cluster_id): + self._set_cluster_id(cluster_id) if self.check_cluster_exists(): try: completion = self.mgr.remove_service('nfs.' + self.cluster_id) diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index ba9d11a9dd8..20a5c66588c 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -281,6 +281,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): super(Module, self).__init__(*args, **kwargs) self.vc = VolumeClient(self) self.fs_export = FSExport(self) + self.nfs = NFSCluster(self) def __del__(self): self.vc.shutdown() @@ -440,14 +441,11 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): return self.fs_export.delete_export(cmd['export_id']) def _cmd_nfs_cluster_create(self, inbuf, cmd): - nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) - return nfs_cluster_obj.create_nfs_cluster(export_type=cmd['type'], - placement=cmd.get('placement', None)) + return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'], + placement=cmd.get('placement', None)) def _cmd_nfs_cluster_update(self, inbuf, cmd): - nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) - return nfs_cluster_obj.update_nfs_cluster(placement=cmd['placement']) + return self.nfs.update_nfs_cluster(cluster_id=cmd['clusterid'], placement=cmd['placement']) def _cmd_nfs_cluster_delete(self, inbuf, cmd): - nfs_cluster_obj = NFSCluster(self, cmd['clusterid']) - return nfs_cluster_obj.delete_nfs_cluster() + return self.nfs.delete_nfs_cluster(cluster_id=cmd['clusterid']) From 85e71eb7d40e1bbd2f77dbc8dce65dd42ea85fb1 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 28 Apr 2020 17:19:32 +0530 Subject: [PATCH 08/28] mgr/volumes/nfs: Add RADOS notify for common config object Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index b3319e15140..5dcf9a7673e 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -250,6 +250,7 @@ class FSExport(object): ioctx.set_namespace(self.rados_namespace) if append: ioctx.append(obj, raw_config.encode('utf-8')) + ioctx.notify(obj) else: ioctx.write_full(obj, raw_config.encode('utf-8')) log.debug( From 5b45610549ec7e3f22fbda77164201bcb7396b78 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 28 Apr 2020 21:57:20 +0530 Subject: [PATCH 09/28] mgr/volumes/nfs: Add delete cephfs export command $ ceph nfs export delete binding: It is the pseudo root name clusterid: It is name of the cluster Signed-off-by: Varsha Rao --- doc/cephfs/fs-nfs-exports.rst | 9 ++++++++ src/pybind/mgr/volumes/fs/nfs.py | 37 ++++++++++++++++++++++++++------ src/pybind/mgr/volumes/module.py | 9 ++++---- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/doc/cephfs/fs-nfs-exports.rst b/doc/cephfs/fs-nfs-exports.rst index 82353fb622a..067d21b1479 100644 --- a/doc/cephfs/fs-nfs-exports.rst +++ b/doc/cephfs/fs-nfs-exports.rst @@ -56,6 +56,15 @@ It creates export rados objects containing the export block. Here binding is the pseudo root name and type is export type. Currently only CephFS is supported. +Delete CephFS Export +==================== + +.. code:: bash + + $ ceph nfs export delete + +It deletes an export in cluster based on pseudo root name (binding). + Configuring NFS-Ganesha to export CephFS with vstart ==================================================== diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 5dcf9a7673e..a4fc27f80b2 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -202,11 +202,10 @@ class FSExport(object): fs_map = self.mgr.get('fs_map') return fs_name in [fs['mdsmap']['fs_name'] for fs in fs_map['filesystems']] - def check_pseudo_path(self, pseudo_path): + def _fetch_export(self, pseudo_path): for ex in self.exports[self.rados_namespace]: if ex.pseudo == pseudo_path: - return True - return False + return ex def _create_user_key(self, entity): osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data=a'.format( @@ -257,6 +256,20 @@ class FSExport(object): "write configuration into rados object %s/%s/%s:\n%s", self.rados_pool, self.rados_namespace, obj, raw_config) + def _delete_export_url(self, obj, ex_id): + export_name = 'export-{}'.format(ex_id) + with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx: + if self.rados_namespace: + ioctx.set_namespace(self.rados_namespace) + + export_urls = ioctx.read(obj) + url = '%url "{}"\n\n'.format(self.make_rados_url(export_name)) + export_urls = export_urls.replace(url.encode('utf-8'), b'') + ioctx.remove_object(export_name) + ioctx.write_full(obj, export_urls) + ioctx.notify(obj) + log.debug("Export deleted: {}".format(url)) + def _update_common_conf(self, cluster_id, ex_id): common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id) conf_blocks = { @@ -280,7 +293,7 @@ class FSExport(object): self.exports[cluster_id] = [] self.rados_namespace = cluster_id - if not self.check_fs(fs_name) or self.check_pseudo_path(pseudo_path): + if not self.check_fs(fs_name) or self._fetch_export(pseudo_path): return -errno.EINVAL,"", "Invalid CephFS name or export already exists" user_id, key = self._create_user_key(cluster_id) @@ -314,8 +327,20 @@ class FSExport(object): return (0, json.dumps(result, indent=4), '') - def delete_export(self, ex_id): - raise NotImplementedError() + def delete_export(self, pseudo_path, cluster_id): + try: + self.rados_namespace = cluster_id + export = self._fetch_export(pseudo_path) + if export: + common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id) + self._delete_export_url(common_conf, export.export_id) + self.exports[cluster_id].remove(export) + else: + log.warn("Export does not exist") + except KeyError: + log.warn("Cluster does not exist") + + return 0, "", "Successfully deleted export" def make_rados_url(self, obj): if self.rados_namespace: diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index 20a5c66588c..fd23867b88c 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -234,8 +234,9 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'perm': 'rw' }, { - 'cmd': 'fs nfs export delete ' - 'name=export_id,type=CephInt,req=true ', + 'cmd': 'nfs export delete ' + 'name=binding,type=CephString ' + 'name=attach,type=CephString ', 'desc': "Delete a cephfs export", 'perm': 'rw' }, @@ -437,8 +438,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), path=cmd.get('path', '/'), cluster_id=cmd.get('attach')) - def _cmd_fs_nfs_export_delete(self, inbuf, cmd): - return self.fs_export.delete_export(cmd['export_id']) + def _cmd_nfs_export_delete(self, inbuf, cmd): + return self.fs_export.delete_export(pseudo_path=cmd['binding'], cluster_id=cmd.get('attach')) def _cmd_nfs_cluster_create(self, inbuf, cmd): return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'], From f81fe362456865265f79b7b13b726fc0fdb6ef54 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Mon, 4 May 2020 19:35:47 +0530 Subject: [PATCH 10/28] vstart: Update details about ganesha packages Signed-off-by: Varsha Rao --- src/vstart.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/vstart.sh b/src/vstart.sh index 939219c4461..f28ae8c2c31 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1064,11 +1064,12 @@ EOF } # Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace -# (version 2.7.6-2 and above) packages installed. On Fedora>=30 these packages -# can be installed directly with 'dnf'. For CentOS>=8 the packages need to be -# downloaded first from https://download.nfs-ganesha.org/2.7/2.7.6/CentOS/ and -# then install it. Similarly for Ubuntu 16.04 follow the instructions on -# https://launchpad.net/~nfs-ganesha/+archive/ubuntu/nfs-ganesha-2.7 +# nfs-ganesha-rados-urls (version 2.8.3 and above) packages installed. On +# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8 +# the packages need to be downloaded first from +# https://download.nfs-ganesha.org/2.8/2.8.3/CentOS and then installed. +# Similarly for Ubuntu 16.04 follow the instructions on +# https://launchpad.net/~nfs-ganesha/+archive/ubuntu/nfs-ganesha-2.8 start_ganesha() { GANESHA_PORT=$(($CEPH_PORT + 4000)) From 31767428fc36f34331b75dd899ed0b9ae6b3ecaf Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Mon, 4 May 2020 19:43:12 +0530 Subject: [PATCH 11/28] vstart: Ensure cephadm and NFS does not conflict Signed-off-by: Varsha Rao --- src/vstart.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vstart.sh b/src/vstart.sh index f28ae8c2c31..aac94848ab5 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1367,7 +1367,7 @@ EOF fi # Ganesha Daemons -if [ $GANESHA_DAEMON_NUM -gt 0 ]; then +if [ $GANESHA_DAEMON_NUM -gt 0 ] && [ "$cephadm" -eq 0 ]; then start_ganesha fi From dd20761908a2a7ba63df941991aa5adfada4d1fb Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 5 May 2020 15:39:48 +0530 Subject: [PATCH 12/28] mgr/volumes: Create user for given path and fs Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index a4fc27f80b2..8db38fa6ace 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -207,13 +207,13 @@ class FSExport(object): if ex.pseudo == pseudo_path: return ex - def _create_user_key(self, entity): - osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data=a'.format( - self.rados_pool, self.rados_namespace) + def _create_user_key(self, entity, path, fs_name): + osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( + self.rados_pool, self.rados_namespace, fs_name) ret, out, err = self.mgr.mon_command({ 'prefix': 'auth get-or-create', 'entity': 'client.{}'.format(entity), - 'caps' : ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path=/'], + 'caps' : ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)], 'format': 'json', }) @@ -296,9 +296,12 @@ class FSExport(object): if not self.check_fs(fs_name) or self._fetch_export(pseudo_path): return -errno.EINVAL,"", "Invalid CephFS name or export already exists" - user_id, key = self._create_user_key(cluster_id) - if isinstance(user_id, int): - return user_id, "", key + ex_id = self._gen_export_id() + user_id = f"{cluster_id}{ex_id}" + user_out, key = self._create_user_key(user_id, path, fs_name) + if isinstance(user_out, int): + return user_out, "", key + access_type = "RW" if read_only: access_type = "R" @@ -308,11 +311,10 @@ class FSExport(object): 'pseudo': self.format_path(pseudo_path), 'cluster_id': cluster_id, 'access_type': access_type, - 'fsal': {"name": "CEPH", "user_id":cluster_id, "fs_name": fs_name, "sec_label_xattr": ""}, + 'fsal': {"name": "CEPH", "user_id": user_id, "fs_name": fs_name, "sec_label_xattr": ""}, 'clients': [] } - ex_id = self._gen_export_id() export = Export.from_dict(ex_id, ex_dict) export.fsal.cephx_key = key self._save_export(export) From b9bff573c965bfa5c90919431e8791610fef7787 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 5 May 2020 16:08:49 +0530 Subject: [PATCH 13/28] mgr/volumes/nfs: Delete user on removing export Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 8db38fa6ace..4ccb97be3ec 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -225,6 +225,15 @@ class FSExport(object): return json_res[0]['entity'], json_res[0]['key'] + def _delete_user(self, entity): + ret, out, err = self.mgr.mon_command({ + 'prefix': 'auth del', + 'entity': 'client.{}'.format(entity), + }) + + if ret!= 0: + log.error(f"User could not be deleted: {err}") + def format_path(self, path): if path is not None: path = path.strip() @@ -337,6 +346,7 @@ class FSExport(object): common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id) self._delete_export_url(common_conf, export.export_id) self.exports[cluster_id].remove(export) + self._delete_user(export.fsal.user_id) else: log.warn("Export does not exist") except KeyError: From e9e09a199f5d0624bda748d78eecba6cae4d7fc5 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Wed, 6 May 2020 15:50:12 +0530 Subject: [PATCH 14/28] vstart: Add watch url for conf-nfs object Signed-off-by: Varsha Rao --- src/vstart.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/vstart.sh b/src/vstart.sh index aac94848ab5..2fb9a9cf1ec 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1085,6 +1085,7 @@ start_ganesha() { test_user="ganesha-$name" pool_name="nfs-ganesha" namespace=$name + url="rados://$pool_name/$namespace/conf-nfs.$test_user" prun rm -rf $ganesha_dir prun mkdir -p $ganesha_dir @@ -1116,7 +1117,7 @@ start_ganesha() { Minor_Versions = 1, 2; } - %url rados://$pool_name/$namespace/conf-nfs.$test_user + %url $url RADOS_KV { pool = $pool_name; @@ -1127,6 +1128,7 @@ start_ganesha() { RADOS_URLS { Userid = $test_user; + watch_url = \"$url\"; }" > "$ganesha_dir/ganesha.conf" wconf < Date: Thu, 14 May 2020 16:46:48 +0530 Subject: [PATCH 15/28] mgr/volumes: Make nfs create export interface idempotent Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 48 ++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 4ccb97be3ec..8b1fa8ac2af 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -232,7 +232,7 @@ class FSExport(object): }) if ret!= 0: - log.error(f"User could not be deleted: {err}") + log.warning(f"User could not be deleted: {err}") def format_path(self, path): if path is not None: @@ -297,36 +297,42 @@ class FSExport(object): def create_export(self, export_type, fs_name, pseudo_path, read_only, path, cluster_id): if export_type != 'cephfs': return -errno.EINVAL,"", f"Invalid export type: {export_type}" + + if not self.check_fs(fs_name): + return -errno.EINVAL,"", "Invalid CephFS name" + #TODO Check if valid cluster if cluster_id not in self.exports: self.exports[cluster_id] = [] self.rados_namespace = cluster_id - if not self.check_fs(fs_name) or self._fetch_export(pseudo_path): - return -errno.EINVAL,"", "Invalid CephFS name or export already exists" - - ex_id = self._gen_export_id() - user_id = f"{cluster_id}{ex_id}" - user_out, key = self._create_user_key(user_id, path, fs_name) - if isinstance(user_out, int): - return user_out, "", key - access_type = "RW" if read_only: access_type = "R" - ex_dict = { - 'path': self.format_path(path), - 'pseudo': self.format_path(pseudo_path), - 'cluster_id': cluster_id, - 'access_type': access_type, - 'fsal': {"name": "CEPH", "user_id": user_id, "fs_name": fs_name, "sec_label_xattr": ""}, - 'clients': [] - } + if not self._fetch_export(pseudo_path): + ex_id = self._gen_export_id() + user_id = f"{cluster_id}{ex_id}" + user_out, key = self._create_user_key(user_id, path, fs_name) - export = Export.from_dict(ex_id, ex_dict) - export.fsal.cephx_key = key - self._save_export(export) + if isinstance(user_out, int): + return user_out, "", key + + ex_dict = { + 'path': self.format_path(path), + 'pseudo': self.format_path(pseudo_path), + 'cluster_id': cluster_id, + 'access_type': access_type, + 'fsal': {"name": "CEPH", "user_id": user_id, + "fs_name": fs_name, "sec_label_xattr": ""}, + 'clients': [] + } + + export = Export.from_dict(ex_id, ex_dict) + export.fsal.cephx_key = key + self._save_export(export) + else: + log.error("Export already exists") result = { "bind": pseudo_path, From d12aed8eeaaa981873aa278d8bdb209e3f6968c9 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 14 May 2020 19:16:35 +0530 Subject: [PATCH 16/28] mgr/volumes/nfs: Delete all exports on cluster deletion Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 8b1fa8ac2af..e1bc490189a 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -344,10 +344,14 @@ class FSExport(object): return (0, json.dumps(result, indent=4), '') - def delete_export(self, pseudo_path, cluster_id): + def delete_export(self, pseudo_path, cluster_id, export_obj=None): try: self.rados_namespace = cluster_id - export = self._fetch_export(pseudo_path) + if export_obj: + export = export_obj + else: + export = self._fetch_export(pseudo_path) + if export: common_conf = 'conf-nfs.ganesha-{}'.format(cluster_id) self._delete_export_url(common_conf, export.export_id) @@ -360,6 +364,12 @@ class FSExport(object): return 0, "", "Successfully deleted export" + def delete_all_exports(self, cluster_id): + export_list = list(self.exports[cluster_id]) + for export in export_list: + self.delete_export(None, cluster_id, export) + log.info(f"All exports successfully deleted for cluster id: {cluster_id}") + def make_rados_url(self, obj): if self.rados_namespace: return "rados://{}/{}/{}".format(self.rados_pool, self.rados_namespace, obj) @@ -452,6 +462,7 @@ class NFSCluster: self._set_cluster_id(cluster_id) if self.check_cluster_exists(): try: + self.mgr.fs_export.delete_all_exports(cluster_id) completion = self.mgr.remove_service('nfs.' + self.cluster_id) self.mgr._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) From 0ee8439b69e309c8c99cc0cb8466ce8dac866e6b Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 14 May 2020 21:15:58 +0530 Subject: [PATCH 17/28] mgr/volumes/nfs: Delete common config object on cluster deletion Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 54 ++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index e1bc490189a..37ab1b8d004 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -365,10 +365,13 @@ class FSExport(object): return 0, "", "Successfully deleted export" def delete_all_exports(self, cluster_id): - export_list = list(self.exports[cluster_id]) - for export in export_list: - self.delete_export(None, cluster_id, export) - log.info(f"All exports successfully deleted for cluster id: {cluster_id}") + try: + export_list = list(self.exports[cluster_id]) + for export in export_list: + self.delete_export(None, cluster_id, export) + log.info(f"All exports successfully deleted for cluster id: {cluster_id}") + except KeyError: + log.info("No exports to delete") def make_rados_url(self, obj): if self.rados_namespace: @@ -382,7 +385,7 @@ class NFSCluster: self.mgr = mgr def create_empty_rados_obj(self): - common_conf = 'conf-nfs.{}'.format(self.cluster_id) + common_conf = self._get_common_conf_obj_name() result = '' with self.mgr.rados.open_ioctx(self.pool_name) as ioctx: if self.pool_ns: @@ -392,15 +395,23 @@ class NFSCluster: "write configuration into rados object %s/%s/%s\n", self.pool_name, self.pool_ns, common_conf) - def check_cluster_exists(self): + def delete_common_config_obj(self): + common_conf = self._get_common_conf_obj_name() + with self.mgr.rados.open_ioctx(self.pool_name) as ioctx: + if self.pool_ns: + ioctx.set_namespace(self.pool_ns) + + ioctx.remove_object(common_conf) + log.info(f"Deleted object:{common_conf}") + + def available_clusters(self): try: completion = self.mgr.describe_service(service_type='nfs') self.mgr._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) - return self.cluster_id in [cluster.spec.service_id for cluster in completion.result] + return [cluster.spec.service_id for cluster in completion.result] except Exception as e: log.exception(str(e)) - return True def _set_cluster_id(self, cluster_id): self.cluster_id = "ganesha-%s" % cluster_id @@ -408,6 +419,9 @@ class NFSCluster: def _set_pool_namespace(self, cluster_id): self.pool_ns = cluster_id + def _get_common_conf_obj_name(self): + return 'conf-nfs.{}'.format(self.cluster_id) + def _call_orch_apply_nfs(self, placement): spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id, pool=self.pool_name, namespace=self.pool_ns, @@ -441,10 +455,11 @@ class NFSCluster: self._set_cluster_id(cluster_id) self.create_empty_rados_obj() - if self.check_cluster_exists(): - log.info(f"{self.cluster_id} cluster already exists") - else: + cluster_list = self.available_clusters() + if isinstance(cluster_list, list) and self.cluster_id not in cluster_list: self._call_orch_apply_nfs(placement) + else: + log.error(f"{self.cluster_id} cluster already exists") return 0, "", "NFS Cluster Created Successfully" @@ -452,24 +467,29 @@ class NFSCluster: self._set_pool_namespace(cluster_id) self._set_cluster_id(cluster_id) - if not self.check_cluster_exists(): - return -errno.EINVAL, "", "Cluster does not exist" + cluster_list = self.available_clusters() + if isinstance(cluster_list, list) and self.cluster_id in cluster_list: + self._call_orch_apply_nfs(placement) + return 0, "", "NFS Cluster Updated Successfully" - self._call_orch_apply_nfs(placement) - return 0, "", "NFS Cluster Updated Successfully" + return -errno.EINVAL, "", "Cluster does not exist" def delete_nfs_cluster(self, cluster_id): self._set_cluster_id(cluster_id) - if self.check_cluster_exists(): + cluster_list = self.available_clusters() + + if isinstance(cluster_list, list) and self.cluster_id in cluster_list: try: self.mgr.fs_export.delete_all_exports(cluster_id) completion = self.mgr.remove_service('nfs.' + self.cluster_id) self.mgr._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) + if len(cluster_list) == 1: + self.delete_common_config_obj() except Exception as e: log.exception("Failed to delete NFS Cluster") return -errno.EINVAL, "", str(e) else: - log.warn("Cluster does not exist") + log.error("Cluster does not exist") return 0, "", "NFS Cluster Deleted Successfully" From 8a84744a5a7b8e7fb158cfe33887e8f0a2b2fb91 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 14 May 2020 21:30:46 +0530 Subject: [PATCH 18/28] mgr/volumes: Rearrange nfs export interface Signed-off-by: Varsha Rao --- doc/cephfs/fs-nfs-exports.rst | 4 ++-- src/pybind/mgr/volumes/fs/nfs.py | 6 +++--- src/pybind/mgr/volumes/module.py | 12 ++++++------ src/vstart.sh | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/doc/cephfs/fs-nfs-exports.rst b/doc/cephfs/fs-nfs-exports.rst index 067d21b1479..2c8f5e203f0 100644 --- a/doc/cephfs/fs-nfs-exports.rst +++ b/doc/cephfs/fs-nfs-exports.rst @@ -50,7 +50,7 @@ Create CephFS Export .. code:: bash - $ ceph nfs export create [--readonly] [--path=/path/in/cephfs] + $ ceph nfs export create [--readonly] [--path=/path/in/cephfs] It creates export rados objects containing the export block. Here binding is the pseudo root name and type is export type. Currently only CephFS is @@ -61,7 +61,7 @@ Delete CephFS Export .. code:: bash - $ ceph nfs export delete + $ ceph nfs export delete It deletes an export in cluster based on pseudo root name (binding). diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 37ab1b8d004..b3f68ed60ca 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -294,7 +294,7 @@ class FSExport(object): self._write_raw_config(conf_block, "export-{}".format(export.export_id)) self._update_common_conf(export.cluster_id, export.export_id) - def create_export(self, export_type, fs_name, pseudo_path, read_only, path, cluster_id): + def create_export(self, export_type, fs_name, cluster_id, pseudo_path, read_only, path): if export_type != 'cephfs': return -errno.EINVAL,"", f"Invalid export type: {export_type}" @@ -344,7 +344,7 @@ class FSExport(object): return (0, json.dumps(result, indent=4), '') - def delete_export(self, pseudo_path, cluster_id, export_obj=None): + def delete_export(self, cluster_id, pseudo_path, export_obj=None): try: self.rados_namespace = cluster_id if export_obj: @@ -368,7 +368,7 @@ class FSExport(object): try: export_list = list(self.exports[cluster_id]) for export in export_list: - self.delete_export(None, cluster_id, export) + self.delete_export(cluster_id, None, export) log.info(f"All exports successfully deleted for cluster id: {cluster_id}") except KeyError: log.info("No exports to delete") diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index fd23867b88c..08f7b2613ce 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -226,8 +226,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'cmd': 'nfs export create ' 'name=type,type=CephString ' 'name=fsname,type=CephString ' - 'name=binding,type=CephString ' 'name=attach,type=CephString ' + 'name=binding,type=CephString ' 'name=readonly,type=CephBool,req=false ' 'name=path,type=CephString,req=false ', 'desc': "Create a cephfs export", @@ -235,8 +235,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): }, { 'cmd': 'nfs export delete ' - 'name=binding,type=CephString ' - 'name=attach,type=CephString ', + 'name=attach,type=CephString ' + 'name=binding,type=CephString ', 'desc': "Delete a cephfs export", 'perm': 'rw' }, @@ -435,11 +435,11 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): def _cmd_nfs_export_create(self, inbuf, cmd): #TODO Extend export creation for rgw. return self.fs_export.create_export(export_type=cmd['type'], fs_name=cmd['fsname'], - pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), - path=cmd.get('path', '/'), cluster_id=cmd.get('attach')) + cluster_id=cmd.get('attach'), pseudo_path=cmd.get('binding'), + read_only=cmd.get('readonly', False), path=cmd.get('path', '/')) def _cmd_nfs_export_delete(self, inbuf, cmd): - return self.fs_export.delete_export(pseudo_path=cmd['binding'], cluster_id=cmd.get('attach')) + return self.fs_export.delete_export(cluster_id=cmd.get('attach'), pseudo_path=cmd.get('binding')) def _cmd_nfs_cluster_create(self, inbuf, cmd): return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'], diff --git a/src/vstart.sh b/src/vstart.sh index 2fb9a9cf1ec..169b2e326a6 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1139,7 +1139,7 @@ start_ganesha() { pid file = $ganesha_dir/ganesha.pid EOF - prun ceph_adm nfs export create cephfs "a" "/cephfs" $name + prun ceph_adm nfs export create cephfs "a" $name "/cephfs" prun ganesha-rados-grace -p $pool_name -n $namespace add $name prun ganesha-rados-grace -p $pool_name -n $namespace From 2c50c9b0c168a6770560d70efd03769ddd43a8a8 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 15 May 2020 21:34:03 +0530 Subject: [PATCH 19/28] vstart: Instead of CACHEINODE use MDCACHE CACHEINODE will be deprecated soon. Instead use MDCACHE. Signed-off-by: Varsha Rao --- src/vstart.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vstart.sh b/src/vstart.sh index 169b2e326a6..b3bed4314c7 100755 --- a/src/vstart.sh +++ b/src/vstart.sh @@ -1106,7 +1106,7 @@ start_ganesha() { NFS_Port = $port; } - CACHEINODE { + MDCACHE { Dir_Chunk = 0; NParts = 1; Cache_Size = 1; From 9bcd7b673d7206a35abc9bf612ec3ff05f491ba7 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 15 May 2020 23:26:13 +0530 Subject: [PATCH 20/28] mgr/volumes/nfs: Remove type option from export create interface Signed-off-by: Varsha Rao --- doc/cephfs/fs-nfs-exports.rst | 5 ++--- src/pybind/mgr/volumes/fs/nfs.py | 5 +---- src/pybind/mgr/volumes/module.py | 12 +++++------- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/doc/cephfs/fs-nfs-exports.rst b/doc/cephfs/fs-nfs-exports.rst index 2c8f5e203f0..5776d77f166 100644 --- a/doc/cephfs/fs-nfs-exports.rst +++ b/doc/cephfs/fs-nfs-exports.rst @@ -50,11 +50,10 @@ Create CephFS Export .. code:: bash - $ ceph nfs export create [--readonly] [--path=/path/in/cephfs] + $ ceph nfs export create cephfs [--readonly] [--path=/path/in/cephfs] It creates export rados objects containing the export block. Here binding is -the pseudo root name and type is export type. Currently only CephFS is -supported. +the pseudo root name and type is export type. Delete CephFS Export ==================== diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index b3f68ed60ca..0df2751895a 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -294,10 +294,7 @@ class FSExport(object): self._write_raw_config(conf_block, "export-{}".format(export.export_id)) self._update_common_conf(export.cluster_id, export.export_id) - def create_export(self, export_type, fs_name, cluster_id, pseudo_path, read_only, path): - if export_type != 'cephfs': - return -errno.EINVAL,"", f"Invalid export type: {export_type}" - + def create_export(self, fs_name, cluster_id, pseudo_path, read_only, path): if not self.check_fs(fs_name): return -errno.EINVAL,"", "Invalid CephFS name" diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index 08f7b2613ce..3ce6f81f8a9 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -223,8 +223,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'perm': 'r' }, { - 'cmd': 'nfs export create ' - 'name=type,type=CephString ' + 'cmd': 'nfs export create cephfs ' 'name=fsname,type=CephString ' 'name=attach,type=CephString ' 'name=binding,type=CephString ' @@ -432,14 +431,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): return self.vc.clone_cancel( vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None)) - def _cmd_nfs_export_create(self, inbuf, cmd): + def _cmd_nfs_export_create_cephfs(self, inbuf, cmd): #TODO Extend export creation for rgw. - return self.fs_export.create_export(export_type=cmd['type'], fs_name=cmd['fsname'], - cluster_id=cmd.get('attach'), pseudo_path=cmd.get('binding'), - read_only=cmd.get('readonly', False), path=cmd.get('path', '/')) + return self.fs_export.create_export(fs_name=cmd['fsname'], cluster_id=cmd['attach'], + pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False), path=cmd.get('path', '/')) def _cmd_nfs_export_delete(self, inbuf, cmd): - return self.fs_export.delete_export(cluster_id=cmd.get('attach'), pseudo_path=cmd.get('binding')) + return self.fs_export.delete_export(cluster_id=cmd['attach'], pseudo_path=cmd['binding']) def _cmd_nfs_cluster_create(self, inbuf, cmd): return self.nfs.create_nfs_cluster(cluster_id=cmd['clusterid'], export_type=cmd['type'], From ab41951b04c99bf55e093937196add25d4face84 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 30 Apr 2020 15:40:52 +0530 Subject: [PATCH 21/28] qa/cephfs: Add tests for nfs Signed-off-by: Varsha Rao --- .../cephadm/workunits/task/test_orch_cli.yaml | 17 ++++ qa/tasks/cephfs/test_nfs.py | 83 +++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 qa/suites/rados/cephadm/workunits/task/test_orch_cli.yaml create mode 100644 qa/tasks/cephfs/test_nfs.py diff --git a/qa/suites/rados/cephadm/workunits/task/test_orch_cli.yaml b/qa/suites/rados/cephadm/workunits/task/test_orch_cli.yaml new file mode 100644 index 00000000000..d57b3e47aa2 --- /dev/null +++ b/qa/suites/rados/cephadm/workunits/task/test_orch_cli.yaml @@ -0,0 +1,17 @@ +roles: +- - host.a + - osd.0 + - osd.1 + - osd.2 + - mon.a + - mgr.a + - client.0 +tasks: +- install: +- cephadm: +- cephadm.shell: + host.a: + - ceph orch apply mds 1 +- cephfs_test_runner: + modules: + - tasks.cephfs.test_nfs diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py new file mode 100644 index 00000000000..fdf184bb6f2 --- /dev/null +++ b/qa/tasks/cephfs/test_nfs.py @@ -0,0 +1,83 @@ +import os +import json +import time +import errno +import logging +from io import BytesIO + +from tasks.mgr.mgr_test_case import MgrTestCase +from teuthology.exceptions import CommandFailedError + +log = logging.getLogger(__name__) + + +class TestNFS(MgrTestCase): + def _nfs_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd("nfs", *args) + + def _orch_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd("orch", *args) + + def _sys_cmd(self, cmd): + cmd[0:0] = ['sudo'] + ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO()) + stdout = ret[0].stdout + if stdout: + # It's RemoteProcess defined in teuthology/orchestra/run.py + return stdout.getvalue() + + def setUp(self): + super(TestNFS, self).setUp() + self._load_module("cephadm") + self._orch_cmd("set", "backend", "cephadm") + + self.cluster_id = "test" + self.export_type = "cephfs" + self.pseudo_path = "/cephfs" + self.expected_name = 'nfs.ganesha-test' + + def _check_port_status(self): + log.info("NETSTAT") + self._sys_cmd(['netstat', '-tnlp']) + + def _check_nfs_server_status(self): + res = self._sys_cmd(['systemctl', 'status', 'nfs-server']) + if isinstance(res, bytes) and b'Active: active' in res: + self._disable_nfs() + + def _disable_nfs(self): + log.info("Disabling NFS") + self._sys_cmd(['systemctl', 'disable', 'nfs-server', '--now']) + + def _check_nfs_status(self): + return self._orch_cmd('ls', 'nfs') + + def _check_idempotency(self, *args): + for _ in range(2): + self._nfs_cmd(*args) + + def test_create_cluster(self): + self._check_nfs_server_status() + self._nfs_cmd("cluster", "create", self.export_type, self.cluster_id) + time.sleep(8) + orch_output = self._check_nfs_status() + expected_status = '1/1' + try: + if self.expected_name not in orch_output or expected_status not in orch_output: + raise CommandFailedError("NFS Ganesha cluster could not be deployed") + except (TypeError, CommandFailedError): + raise + + def test_create_cluster_idempotent(self): + self._check_nfs_server_status() + self._check_idempotency("cluster", "create", self.export_type, self.cluster_id) + + def test_delete_cluster(self): + self.test_create_cluster() + self._nfs_cmd("cluster", "delete", self.cluster_id) + time.sleep(8) + orch_output = self._check_nfs_status() + self.assertEqual("No services reported\n", orch_output) + + def test_delete_cluster_idempotent(self): + self._check_idempotency("cluster", "delete", self.cluster_id) From fee3f25609130c8f03a829b0ad28a6aa0760dd35 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 26 May 2020 15:43:40 +0530 Subject: [PATCH 22/28] mgr/fs/nfs: Use check_mon_command() instead of mon_command() check_mon_command() checks the return code of mon command. Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 33 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 0df2751895a..cb34bd9e8f0 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -210,29 +210,26 @@ class FSExport(object): def _create_user_key(self, entity, path, fs_name): osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( self.rados_pool, self.rados_namespace, fs_name) - ret, out, err = self.mgr.mon_command({ + + ret, out, err = self.mgr.check_mon_command({ 'prefix': 'auth get-or-create', 'entity': 'client.{}'.format(entity), 'caps' : ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)], 'format': 'json', }) - if ret!= 0: - return ret, err - json_res = json.loads(out) - log.info("Export user is {}".format(json_res[0]['entity'])) - + log.info("Export user created is {}".format(json_res[0]['entity'])) return json_res[0]['entity'], json_res[0]['key'] def _delete_user(self, entity): - ret, out, err = self.mgr.mon_command({ - 'prefix': 'auth del', - 'entity': 'client.{}'.format(entity), - }) - - if ret!= 0: - log.warning(f"User could not be deleted: {err}") + try: + self.mgr.check_mon_command({ + 'prefix': 'auth del', + 'entity': 'client.{}'.format(entity), + }) + except MonCommandFailed as e: + log.warning(f"User could not be deleted: {e}") def format_path(self, path): if path is not None: @@ -312,9 +309,6 @@ class FSExport(object): user_id = f"{cluster_id}{ex_id}" user_out, key = self._create_user_key(user_id, path, fs_name) - if isinstance(user_out, int): - return user_out, "", key - ex_dict = { 'path': self.format_path(path), 'pseudo': self.format_path(pseudo_path), @@ -442,11 +436,8 @@ class NFSCluster: return r, out, err log.info("{}".format(out)) - command = {'prefix': 'osd pool application enable', 'pool': self.pool_name, 'app': 'nfs'} - r, out, err = self.mgr.mon_command(command) - - if r != 0: - return r, out, err + self.mgr.check_mon_command({'prefix': 'osd pool application enable', + 'pool': self.pool_name, 'app': 'nfs'}) self._set_pool_namespace(cluster_id) self._set_cluster_id(cluster_id) From 923d1814866736067b76236c3cadaccbad0e871d Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 26 May 2020 15:47:55 +0530 Subject: [PATCH 23/28] mgr/volumes/nfs: Fix incorrect read only access_type value Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index cb34bd9e8f0..72607961a23 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -302,7 +302,7 @@ class FSExport(object): self.rados_namespace = cluster_id access_type = "RW" if read_only: - access_type = "R" + access_type = "RO" if not self._fetch_export(pseudo_path): ex_id = self._gen_export_id() From b8ce61e8bde7a0cb1eed7129038701c10fd275cd Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 28 May 2020 15:44:56 +0530 Subject: [PATCH 24/28] mgr/volumes/nfs: Fix idempotency of cluster and export commands Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 203 +++++++++++++++---------------- 1 file changed, 98 insertions(+), 105 deletions(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 72607961a23..95a65cc53eb 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -223,13 +223,11 @@ class FSExport(object): return json_res[0]['entity'], json_res[0]['key'] def _delete_user(self, entity): - try: - self.mgr.check_mon_command({ - 'prefix': 'auth del', - 'entity': 'client.{}'.format(entity), - }) - except MonCommandFailed as e: - log.warning(f"User could not be deleted: {e}") + self.mgr.check_mon_command({ + 'prefix': 'auth del', + 'entity': 'client.{}'.format(entity), + }) + log.info(f"Export user deleted is {entity}") def format_path(self, path): if path is not None: @@ -292,48 +290,47 @@ class FSExport(object): self._update_common_conf(export.cluster_id, export.export_id) def create_export(self, fs_name, cluster_id, pseudo_path, read_only, path): - if not self.check_fs(fs_name): - return -errno.EINVAL,"", "Invalid CephFS name" + try: + if not self.check_fs(fs_name): + return -errno.EINVAL,"", "Invalid CephFS name" - #TODO Check if valid cluster - if cluster_id not in self.exports: - self.exports[cluster_id] = [] + #TODO Check if valid cluster + if cluster_id not in self.exports: + self.exports[cluster_id] = [] - self.rados_namespace = cluster_id - access_type = "RW" - if read_only: - access_type = "RO" + self.rados_namespace = cluster_id - if not self._fetch_export(pseudo_path): - ex_id = self._gen_export_id() - user_id = f"{cluster_id}{ex_id}" - user_out, key = self._create_user_key(user_id, path, fs_name) - - ex_dict = { - 'path': self.format_path(path), - 'pseudo': self.format_path(pseudo_path), - 'cluster_id': cluster_id, - 'access_type': access_type, - 'fsal': {"name": "CEPH", "user_id": user_id, - "fs_name": fs_name, "sec_label_xattr": ""}, - 'clients': [] - } - - export = Export.from_dict(ex_id, ex_dict) - export.fsal.cephx_key = key - self._save_export(export) - else: - log.error("Export already exists") - - result = { - "bind": pseudo_path, - "fs": fs_name, - "path": path, - "cluster": cluster_id, - "mode": access_type, - } - - return (0, json.dumps(result, indent=4), '') + if not self._fetch_export(pseudo_path): + ex_id = self._gen_export_id() + user_id = f"{cluster_id}{ex_id}" + user_out, key = self._create_user_key(user_id, path, fs_name) + access_type = "RW" + if read_only: + access_type = "RO" + ex_dict = { + 'path': self.format_path(path), + 'pseudo': self.format_path(pseudo_path), + 'cluster_id': cluster_id, + 'access_type': access_type, + 'fsal': {"name": "CEPH", "user_id": user_id, + "fs_name": fs_name, "sec_label_xattr": ""}, + 'clients': [] + } + export = Export.from_dict(ex_id, ex_dict) + export.fsal.cephx_key = key + self._save_export(export) + result = { + "bind": pseudo_path, + "fs": fs_name, + "path": path, + "cluster": cluster_id, + "mode": access_type, + } + return (0, json.dumps(result, indent=4), '') + return 0, "", "Export already exists" + except Exception as e: + log.warning("Failed to create exports") + return -errno.EINVAL, "", str(e) def delete_export(self, cluster_id, pseudo_path, export_obj=None): try: @@ -348,18 +345,21 @@ class FSExport(object): self._delete_export_url(common_conf, export.export_id) self.exports[cluster_id].remove(export) self._delete_user(export.fsal.user_id) - else: - log.warn("Export does not exist") + return 0, "Successfully deleted export", "" + return 0, "", "Export does not exist" except KeyError: - log.warn("Cluster does not exist") - - return 0, "", "Successfully deleted export" + return -errno.EINVAL, "", "Cluster does not exist" + except Exception as e: + log.warning("Failed to delete exports") + return -errno.EINVAL, "", str(e) def delete_all_exports(self, cluster_id): try: export_list = list(self.exports[cluster_id]) for export in export_list: - self.delete_export(cluster_id, None, export) + ret, out, err = self.delete_export(cluster_id, None, export) + if ret != 0: + raise Exception("Failed to delete exports: {err} and {ret}") log.info(f"All exports successfully deleted for cluster id: {cluster_id}") except KeyError: log.info("No exports to delete") @@ -396,13 +396,10 @@ class NFSCluster: log.info(f"Deleted object:{common_conf}") def available_clusters(self): - try: - completion = self.mgr.describe_service(service_type='nfs') - self.mgr._orchestrator_wait([completion]) - orchestrator.raise_if_exception(completion) - return [cluster.spec.service_id for cluster in completion.result] - except Exception as e: - log.exception(str(e)) + completion = self.mgr.describe_service(service_type='nfs') + self.mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + return [cluster.spec.service_id for cluster in completion.result] def _set_cluster_id(self, cluster_id): self.cluster_id = "ganesha-%s" % cluster_id @@ -417,67 +414,63 @@ class NFSCluster: spec = NFSServiceSpec(service_type='nfs', service_id=self.cluster_id, pool=self.pool_name, namespace=self.pool_ns, placement=PlacementSpec.from_string(placement)) - try: - completion = self.mgr.apply_nfs(spec) - self.mgr._orchestrator_wait([completion]) - orchestrator.raise_if_exception(completion) - except Exception as e: - log.exception("Failed to create NFS daemons:{}".format(e)) + completion = self.mgr.apply_nfs(spec) + self.mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) def create_nfs_cluster(self, export_type, cluster_id, placement): if export_type != 'cephfs': - return -errno.EINVAL,"", f"Invalid export type: {export_type}" + return -errno.EINVAL, "", f"Invalid export type: {export_type}" + try: + pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])] - pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])] + if self.pool_name not in pool_list: + r, out, err = create_pool(self.mgr, self.pool_name) + if r != 0: + return r, out, err + log.info(f"Pool Status: {out}") - if self.pool_name not in pool_list: - r, out, err = create_pool(self.mgr, self.pool_name) - if r != 0: - return r, out, err - log.info("{}".format(out)) + self.mgr.check_mon_command({'prefix': 'osd pool application enable', + 'pool': self.pool_name, 'app': 'nfs'}) - self.mgr.check_mon_command({'prefix': 'osd pool application enable', - 'pool': self.pool_name, 'app': 'nfs'}) + self._set_pool_namespace(cluster_id) + self._set_cluster_id(cluster_id) + self.create_empty_rados_obj() - self._set_pool_namespace(cluster_id) - self._set_cluster_id(cluster_id) - self.create_empty_rados_obj() - - cluster_list = self.available_clusters() - if isinstance(cluster_list, list) and self.cluster_id not in cluster_list: - self._call_orch_apply_nfs(placement) - else: - log.error(f"{self.cluster_id} cluster already exists") - - return 0, "", "NFS Cluster Created Successfully" + if self.cluster_id not in self.available_clusters(): + self._call_orch_apply_nfs(placement) + return 0, "NFS Cluster Created Successfully", "" + return 0, "", f"{self.cluster_id} cluster already exists" + except Exception as e: + log.warning("NFS Cluster could not be created") + return -errno.EINVAL, "", str(e) def update_nfs_cluster(self, cluster_id, placement): - self._set_pool_namespace(cluster_id) - self._set_cluster_id(cluster_id) - - cluster_list = self.available_clusters() - if isinstance(cluster_list, list) and self.cluster_id in cluster_list: - self._call_orch_apply_nfs(placement) - return 0, "", "NFS Cluster Updated Successfully" - - return -errno.EINVAL, "", "Cluster does not exist" + try: + self._set_pool_namespace(cluster_id) + self._set_cluster_id(cluster_id) + if self.cluster_id in self.available_clusters(): + self._call_orch_apply_nfs(placement) + return 0, "NFS Cluster Updated Successfully", "" + return -errno.EINVAL, "", "Cluster does not exist" + except Exception as e: + log.warning("NFS Cluster could not be updated") + return -errno.EINVAL, "", str(e) def delete_nfs_cluster(self, cluster_id): - self._set_cluster_id(cluster_id) - cluster_list = self.available_clusters() + try: + self._set_cluster_id(cluster_id) + cluster_list = self.available_clusters() - if isinstance(cluster_list, list) and self.cluster_id in cluster_list: - try: + if self.cluster_id in self.available_clusters(): self.mgr.fs_export.delete_all_exports(cluster_id) completion = self.mgr.remove_service('nfs.' + self.cluster_id) self.mgr._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) if len(cluster_list) == 1: self.delete_common_config_obj() - except Exception as e: - log.exception("Failed to delete NFS Cluster") - return -errno.EINVAL, "", str(e) - else: - log.error("Cluster does not exist") - - return 0, "", "NFS Cluster Deleted Successfully" + return 0, "NFS Cluster Deleted Successfully", "" + return 0, "", "Cluster does not exist" + except Exception as e: + log.warning("Failed to delete NFS Cluster") + return -errno.EINVAL, "", str(e) From af3b925dfd97929e2b967ab6b05e92b7d0de6ece Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Thu, 28 May 2020 10:57:25 +0000 Subject: [PATCH 25/28] qa/tasks/cephfs: Add tests for nfs exports Signed-off-by: Varsha Rao --- qa/tasks/cephfs/test_nfs.py | 86 ++++++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 24 deletions(-) diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py index fdf184bb6f2..9368e95875f 100644 --- a/qa/tasks/cephfs/test_nfs.py +++ b/qa/tasks/cephfs/test_nfs.py @@ -12,18 +12,20 @@ log = logging.getLogger(__name__) class TestNFS(MgrTestCase): + def _cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args) + def _nfs_cmd(self, *args): - return self.mgr_cluster.mon_manager.raw_cluster_cmd("nfs", *args) + return self._cmd("nfs", *args) def _orch_cmd(self, *args): - return self.mgr_cluster.mon_manager.raw_cluster_cmd("orch", *args) + return self._cmd("orch", *args) def _sys_cmd(self, cmd): cmd[0:0] = ['sudo'] ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO()) stdout = ret[0].stdout if stdout: - # It's RemoteProcess defined in teuthology/orchestra/run.py return stdout.getvalue() def setUp(self): @@ -34,7 +36,9 @@ class TestNFS(MgrTestCase): self.cluster_id = "test" self.export_type = "cephfs" self.pseudo_path = "/cephfs" - self.expected_name = 'nfs.ganesha-test' + self.path = "/" + self.fs_name = "nfs-cephfs" + self.expected_name = "nfs.ganesha-test" def _check_port_status(self): log.info("NETSTAT") @@ -52,32 +56,66 @@ class TestNFS(MgrTestCase): def _check_nfs_status(self): return self._orch_cmd('ls', 'nfs') - def _check_idempotency(self, *args): - for _ in range(2): - self._nfs_cmd(*args) - - def test_create_cluster(self): + def _test_create_cluster(self): self._check_nfs_server_status() - self._nfs_cmd("cluster", "create", self.export_type, self.cluster_id) + self._nfs_cmd('cluster', 'create', self.export_type, self.cluster_id) time.sleep(8) orch_output = self._check_nfs_status() expected_status = '1/1' - try: - if self.expected_name not in orch_output or expected_status not in orch_output: - raise CommandFailedError("NFS Ganesha cluster could not be deployed") - except (TypeError, CommandFailedError): - raise + if self.expected_name not in orch_output or expected_status not in orch_output: + raise RuntimeError("NFS Ganesha cluster could not be deployed") - def test_create_cluster_idempotent(self): - self._check_nfs_server_status() - self._check_idempotency("cluster", "create", self.export_type, self.cluster_id) - - def test_delete_cluster(self): - self.test_create_cluster() - self._nfs_cmd("cluster", "delete", self.cluster_id) + def _test_delete_cluster(self): + self._nfs_cmd('cluster', 'delete', self.cluster_id) time.sleep(8) orch_output = self._check_nfs_status() self.assertEqual("No services reported\n", orch_output) - def test_delete_cluster_idempotent(self): - self._check_idempotency("cluster", "delete", self.cluster_id) + def _create_export(self, export_id, create_fs=False, extra_cmd=None): + if create_fs: + self._cmd('fs', 'volume', 'create', self.fs_name) + export_cmd = ['nfs', 'export', 'create', 'cephfs', self.fs_name, self.cluster_id] + if isinstance(extra_cmd, list): + export_cmd.extend(extra_cmd) + else: + export_cmd.append(self.pseudo_path) + + self._cmd(*export_cmd) + res = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'get', f'export-{export_id}', '-']) + if res == b'': + raise RuntimeError("Export cannot be created") + + def _create_default_export(self): + self._test_create_cluster() + self._create_export(export_id='1', create_fs=True) + + def _delete_export(self): + self._nfs_cmd('export', 'delete', self.cluster_id, self.pseudo_path) + + def _check_export_obj_deleted(self, conf_obj=False): + rados_obj_ls = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'ls']) + + if b'export-' in rados_obj_ls or (conf_obj and b'conf-nfs' in rados_obj_ls): + raise RuntimeError("Delete export failed") + + def test_create_and_delete_cluster(self): + self._test_create_cluster() + self._test_delete_cluster() + + def test_export_create_and_delete(self): + self._create_default_export() + self._delete_export() + self._check_export_obj_deleted() + self._test_delete_cluster() + + def _test_create_multiple_exports(self): + #Export-1 with default values + self._create_default_export() + #Export-2 with r only + self._create_export(export_id='2', extra_cmd=[self.pseudo_path, '--readonly']) + #Export-3 for subvolume with r only + self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol') + fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol') + self._create_export(export_id='3', extra_cmd=[self.pseudo_path, '--readonly', fs_path.strip()]) + self._test_delete_cluster() + self._check_export_obj_deleted(conf_obj=True) From 1477c987647f9bde96cddfea84ee797b71471bab Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 29 May 2020 15:24:03 +0530 Subject: [PATCH 26/28] qa/tasks/cephfs: Don't enable cephadm in TestNFS Signed-off-by: Varsha Rao --- qa/tasks/cephfs/test_nfs.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py index 9368e95875f..32c33c3e220 100644 --- a/qa/tasks/cephfs/test_nfs.py +++ b/qa/tasks/cephfs/test_nfs.py @@ -30,9 +30,6 @@ class TestNFS(MgrTestCase): def setUp(self): super(TestNFS, self).setUp() - self._load_module("cephadm") - self._orch_cmd("set", "backend", "cephadm") - self.cluster_id = "test" self.export_type = "cephfs" self.pseudo_path = "/cephfs" From 87c517af310a2fb0a5226867b6a852393c0f6b44 Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 29 May 2020 23:24:54 +0530 Subject: [PATCH 27/28] mgr/nfs: Instead of 'auth del' use 'auth rm' `mgr` profile allows 'auth rm'. Use it instead of 'auth del' which is not allowed. Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 95a65cc53eb..5bdabcf8976 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -224,7 +224,7 @@ class FSExport(object): def _delete_user(self, entity): self.mgr.check_mon_command({ - 'prefix': 'auth del', + 'prefix': 'auth rm', 'entity': 'client.{}'.format(entity), }) log.info(f"Export user deleted is {entity}") From b2adff10b55ddf11088c4176a6e03ee150e55b8a Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Fri, 29 May 2020 23:36:34 +0530 Subject: [PATCH 28/28] qa/tasks/cephfs: Enable multiple exports tests Signed-off-by: Varsha Rao --- qa/tasks/cephfs/test_nfs.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py index 32c33c3e220..850b0b736ef 100644 --- a/qa/tasks/cephfs/test_nfs.py +++ b/qa/tasks/cephfs/test_nfs.py @@ -105,14 +105,16 @@ class TestNFS(MgrTestCase): self._check_export_obj_deleted() self._test_delete_cluster() - def _test_create_multiple_exports(self): + def test_create_multiple_exports(self): #Export-1 with default values self._create_default_export() #Export-2 with r only - self._create_export(export_id='2', extra_cmd=[self.pseudo_path, '--readonly']) + self._create_export(export_id='2', extra_cmd=[self.pseudo_path+'1', '--readonly']) #Export-3 for subvolume with r only self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol') fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol') - self._create_export(export_id='3', extra_cmd=[self.pseudo_path, '--readonly', fs_path.strip()]) + self._create_export(export_id='3', extra_cmd=[self.pseudo_path+'2', '--readonly', fs_path.strip()]) + #Export-4 for subvolume + self._create_export(export_id='4', extra_cmd=[self.pseudo_path+'3', fs_path.strip()]) self._test_delete_cluster() self._check_export_obj_deleted(conf_obj=True)