mgr/dashboard: consume mgr/nfs via mgr.remote()

Stop using the dashboard version of the Ganesha config classes; consume
mgr/nfs instead via remote().

mgr/nfs/export: return Export from _apply_export

Future callers will want this.

mgr/nfs: new module methods for dashboard consumption

Add some new methods that are easy for the dashboard API to consume.  These
are very similar to the CLI methods but do now have the @CLICommand and
related decorators, and have slightly different interfaces (e.g., returning
the created/modified Export dict).

mgr/dashboard: remove old ganesha code (and tests)

Fixes: https://tracker.ceph.com/issues/46493
Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
Sage Weil 2021-07-28 10:29:47 -04:00 committed by Alfonso Martínez
parent 5db11d3988
commit 6e5a1eefd0
6 changed files with 123 additions and 2143 deletions

View File

@ -7,17 +7,21 @@ from functools import partial
import cephfs
import cherrypy
from .. import mgr
from ..security import Scope
from ..services.cephfs import CephFS
from ..services.cephx import CephX
from ..services.exception import DashboardException, serialize_dashboard_exception
from ..services.ganesha import Ganesha, GaneshaConf, NFSException
from ..services.rgw_client import NoCredentialsException, \
NoRgwDaemonsException, RequestException, RgwClient
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
ReadPermission, RESTController, Task, UIRouter
logger = logging.getLogger('controllers.ganesha')
logger = logging.getLogger('controllers.nfs')
class NFSException(DashboardException):
def __init__(self, msg):
super(NFSException, self).__init__(component="nfs", msg=msg)
# documentation helpers
@ -100,8 +104,8 @@ class NFSGanesha(RESTController):
def status(self):
status = {'available': True, 'message': None}
try:
Ganesha.get_ganesha_clusters()
except NFSException as e:
mgr.remote('nfs', 'is_active')
except (NameError, ImportError) as e:
status['message'] = str(e) # type: ignore
status['available'] = False
@ -116,12 +120,7 @@ class NFSGaneshaExports(RESTController):
@EndpointDoc("List all NFS-Ganesha exports",
responses={200: [EXPORT_SCHEMA]})
def list(self):
result = []
for cluster_id in Ganesha.get_ganesha_clusters():
result.extend(
[export.to_dict()
for export in GaneshaConf.instance(cluster_id).list_exports()])
return result
return mgr.remote('nfs', 'export_ls')
@NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
'cluster_id': '{cluster_id}'}, 2.0)
@ -131,18 +130,18 @@ class NFSGaneshaExports(RESTController):
def create(self, path, cluster_id, daemons, pseudo, tag, access_type,
squash, security_label, protocols, transports, fsal, clients,
reload_daemons=True):
if fsal['name'] not in Ganesha.fsals_available():
if fsal['name'] not in mgr.remote('nfs', 'cluster_fsals'):
raise NFSException("Cannot create this export. "
"FSAL '{}' cannot be managed by the dashboard."
.format(fsal['name']))
ganesha_conf = GaneshaConf.instance(cluster_id)
ex_id = ganesha_conf.create_export({
fsal.pop('user_id') # mgr/nfs does not let you customize user_id
# FIXME: what was this? 'tag': tag,
raw_ex = {
'path': path,
'pseudo': pseudo,
'cluster_id': cluster_id,
'daemons': daemons,
'tag': tag,
'access_type': access_type,
'squash': squash,
'security_label': security_label,
@ -150,10 +149,9 @@ class NFSGaneshaExports(RESTController):
'transports': transports,
'fsal': fsal,
'clients': clients
})
if reload_daemons:
ganesha_conf.reload_daemons(daemons)
return ganesha_conf.get_export(ex_id).to_dict()
}
export = mgr.remote('nfs', 'export_apply', cluster_id, raw_ex)
return export
@EndpointDoc("Get an NFS-Ganesha export",
parameters={
@ -162,11 +160,7 @@ class NFSGaneshaExports(RESTController):
},
responses={200: EXPORT_SCHEMA})
def get(self, cluster_id, export_id):
export_id = int(export_id)
ganesha_conf = GaneshaConf.instance(cluster_id)
if not ganesha_conf.has_export(export_id):
raise cherrypy.HTTPError(404)
return ganesha_conf.get_export(export_id).to_dict()
return mgr.remote('nfs', 'export_get', cluster_id, export_id)
@NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
2.0)
@ -178,23 +172,22 @@ class NFSGaneshaExports(RESTController):
squash, security_label, protocols, transports, fsal, clients,
reload_daemons=True):
export_id = int(export_id)
ganesha_conf = GaneshaConf.instance(cluster_id)
if not ganesha_conf.has_export(export_id):
if not mgr.remote('nfs', 'export_get', export_id):
raise cherrypy.HTTPError(404) # pragma: no cover - the handling is too obvious
if fsal['name'] not in Ganesha.fsals_available():
if fsal['name'] not in mgr.remote('nfs', 'cluster_fsals'):
raise NFSException("Cannot make modifications to this export. "
"FSAL '{}' cannot be managed by the dashboard."
.format(fsal['name']))
old_export = ganesha_conf.update_export({
'export_id': export_id,
fsal.pop('user_id') # mgr/nfs does not let you customize user_id
# FIXME: what was this? 'tag': tag,
raw_ex = {
'path': path,
'pseudo': pseudo,
'cluster_id': cluster_id,
'daemons': daemons,
'pseudo': pseudo,
'tag': tag,
'access_type': access_type,
'squash': squash,
'security_label': security_label,
@ -202,14 +195,9 @@ class NFSGaneshaExports(RESTController):
'transports': transports,
'fsal': fsal,
'clients': clients
})
daemons = list(daemons)
for d_id in old_export.daemons:
if d_id not in daemons:
daemons.append(d_id)
if reload_daemons:
ganesha_conf.reload_daemons(daemons)
return ganesha_conf.get_export(export_id).to_dict()
}
export = mgr.remote('nfs', 'export_apply', cluster_id, raw_ex)
return export
@NfsTask('delete', {'cluster_id': '{cluster_id}',
'export_id': '{export_id}'}, 2.0)
@ -224,13 +212,11 @@ class NFSGaneshaExports(RESTController):
})
def delete(self, cluster_id, export_id, reload_daemons=True):
export_id = int(export_id)
ganesha_conf = GaneshaConf.instance(cluster_id)
if not ganesha_conf.has_export(export_id):
export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
if not export:
raise cherrypy.HTTPError(404) # pragma: no cover - the handling is too obvious
export = ganesha_conf.remove_export(export_id)
if reload_daemons:
ganesha_conf.reload_daemons(export.daemons)
mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
@APIRouter('/nfs-ganesha/daemon', Scope.NFS_GANESHA)
@ -241,15 +227,13 @@ class NFSGaneshaService(RESTController):
responses={200: [{
'daemon_id': (str, 'Daemon identifier'),
'cluster_id': (str, 'Cluster identifier'),
'cluster_type': (str, 'Cluster type'),
'cluster_type': (str, 'Cluster type'), # FIXME: remove this property
'status': (int, 'Status of daemon', True),
'desc': (str, 'Status description', True)
}]})
def list(self):
result = []
for cluster_id in Ganesha.get_ganesha_clusters():
result.extend(GaneshaConf.instance(cluster_id).list_daemons())
return result
# FIXME: remove this; dashboard should only care about clusters.
return mgr.remote('nfs', 'daemon_ls')
@UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
@ -257,12 +241,13 @@ class NFSGaneshaUi(BaseController):
@Endpoint('GET', '/cephx/clients')
@ReadPermission
def cephx_clients(self):
return list(CephX.list_clients())
# FIXME: remove this; cephx users/creds are managed by mgr/nfs
return ['admin']
@Endpoint('GET', '/fsals')
@ReadPermission
def fsals(self):
return Ganesha.fsals_available()
return mgr.remote('nfs', 'cluster_fsals')
@Endpoint('GET', '/lsdir')
@ReadPermission
@ -316,4 +301,4 @@ class NFSGaneshaUi(BaseController):
@Endpoint('GET', '/clusters')
@ReadPermission
def clusters(self):
return Ganesha.get_ganesha_clusters()
return mgr.remote('nfs', 'cluster_ls')

File diff suppressed because it is too large Load Diff

View File

@ -1,965 +1,11 @@
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
import unittest
from unittest.mock import MagicMock, Mock, patch
from unittest.mock import patch
from urllib.parse import urlencode
from ceph.deployment.service_spec import NFSServiceSpec
from orchestrator import DaemonDescription, ServiceDescription
from .. import mgr
from ..controllers.nfsganesha import NFSGaneshaUi
from ..services import ganesha
from ..services.ganesha import ClusterType, Export, GaneshaConf, GaneshaConfParser, NFSException
from ..settings import Settings
from . import ControllerTestCase # pylint: disable=no-name-in-module
from . import KVStoreMockMixin # pylint: disable=no-name-in-module
class GaneshaConfTest(unittest.TestCase, KVStoreMockMixin):
daemon_raw_config = """
NFS_CORE_PARAM {
Enable_NLM = false;
Enable_RQUOTA = false;
Protocols = 4;
NFS_Port = 14000;
}
MDCACHE {
Dir_Chunk = 0;
}
NFSv4 {
RecoveryBackend = rados_cluster;
Minor_Versions = 1, 2;
}
RADOS_KV {
pool = nfs-ganesha;
namespace = vstart;
UserId = vstart;
nodeid = a;
}
RADOS_URLS {
Userid = vstart;
watch_url = 'rados://nfs-ganesha/vstart/conf-nfs.vstart';
}
%url rados://nfs-ganesha/vstart/conf-nfs.vstart
"""
export_1 = """
EXPORT {
Export_ID=1;
Protocols = 4;
Path = /;
Pseudo = /cephfs_a/;
Access_Type = RW;
Protocols = 4;
Attr_Expiration_Time = 0;
# Delegations = R;
# Squash = root;
FSAL {
Name = CEPH;
Filesystem = "a";
User_Id = "ganesha";
# Secret_Access_Key = "YOUR SECRET KEY HERE";
}
CLIENT
{
Clients = 192.168.0.10, 192.168.1.0/8;
Squash = None;
}
CLIENT
{
Clients = 192.168.0.0/16;
Squash = All;
Access_Type = RO;
}
}
"""
export_2 = """
EXPORT
{
Export_ID=2;
Path = "/";
Pseudo = "/rgw";
Access_Type = RW;
squash = AllAnonymous;
Protocols = 4, 3;
Transports = TCP, UDP;
FSAL {
Name = RGW;
User_Id = "testuser";
Access_Key_Id ="access_key";
Secret_Access_Key = "secret_key";
}
}
"""
conf_nodea = '''
%url "rados://nfs-ganesha/bar/export-2"
%url "rados://nfs-ganesha/bar/export-1"'''
conf_nodeb = '%url "rados://nfs-ganesha/bar/export-1"'
conf_nfs_foo = '''
%url "rados://nfs-ganesha/foo/export-1"
%url "rados://nfs-ganesha/foo/export-2"'''
class RObject(object):
def __init__(self, key, raw):
self.key = key
self.raw = raw
def read(self, _):
return self.raw.encode('utf-8')
def stat(self):
return len(self.raw), None
def _ioctx_write_full_mock(self, key, content):
if key not in self.temp_store[self.temp_store_namespace]:
self.temp_store[self.temp_store_namespace][key] = \
GaneshaConfTest.RObject(key, content.decode('utf-8'))
else:
self.temp_store[self.temp_store_namespace][key].raw = content.decode('utf-8')
def _ioctx_remove_mock(self, key):
del self.temp_store[self.temp_store_namespace][key]
def _ioctx_list_objects_mock(self):
return [obj for _, obj in self.temp_store[self.temp_store_namespace].items()]
def _ioctl_stat_mock(self, key):
return self.temp_store[self.temp_store_namespace][key].stat()
def _ioctl_read_mock(self, key, size):
return self.temp_store[self.temp_store_namespace][key].read(size)
def _ioctx_set_namespace_mock(self, namespace):
self.temp_store_namespace = namespace
@staticmethod
def _set_user_defined_clusters_location(clusters_pool_namespace='nfs-ganesha/bar'):
Settings.GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE = clusters_pool_namespace
def setUp(self):
self.mock_kv_store()
self.clusters = {
'foo': {
'pool': 'nfs-ganesha',
'namespace': 'foo',
'type': ClusterType.ORCHESTRATOR,
'daemon_conf': 'conf-nfs.foo',
'daemons': ['foo.host_a', 'foo.host_b'],
'exports': {
1: ['foo.host_a', 'foo.host_b'],
2: ['foo.host_a', 'foo.host_b'],
3: ['foo.host_a', 'foo.host_b'] # for new-added export
}
}
}
# Unset user-defined location.
self._set_user_defined_clusters_location('')
self.temp_store_namespace = None
self._reset_temp_store()
self.io_mock = MagicMock()
self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock
self.io_mock.read = self._ioctl_read_mock
self.io_mock.stat = self._ioctl_stat_mock
self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock
self.io_mock.write_full.side_effect = self._ioctx_write_full_mock
self.io_mock.remove_object.side_effect = self._ioctx_remove_mock
ioctx_mock = MagicMock()
ioctx_mock.__enter__ = Mock(return_value=(self.io_mock))
ioctx_mock.__exit__ = Mock(return_value=None)
mgr.rados = MagicMock()
mgr.rados.open_ioctx.return_value = ioctx_mock
self._mock_orchestrator(True)
ganesha.CephX = MagicMock()
ganesha.CephX.list_clients.return_value = ['ganesha']
ganesha.CephX.get_client_key.return_value = 'ganesha'
ganesha.CephFS = MagicMock()
def _reset_temp_store(self):
self.temp_store_namespace = None
self.temp_store = {
'bar': {
'export-1': GaneshaConfTest.RObject("export-1", self.export_1),
'export-2': GaneshaConfTest.RObject("export-2", self.export_2),
'conf-nodea': GaneshaConfTest.RObject("conf-nodea", self.conf_nodea),
'conf-nodeb': GaneshaConfTest.RObject("conf-nodeb", self.conf_nodeb),
},
'foo': {
'export-1': GaneshaConfTest.RObject("export-1", self.export_1),
'export-2': GaneshaConfTest.RObject("export-2", self.export_2),
'conf-nfs.foo': GaneshaConfTest.RObject("conf-nfs.foo", self.conf_nfs_foo)
}
}
def _mock_orchestrator(self, enable):
# mock nfs services
orch_nfs_services = [
ServiceDescription(spec=NFSServiceSpec(service_id='foo'))
] if enable else []
# pylint: disable=protected-access
ganesha.Ganesha._get_orch_nfs_services = Mock(return_value=orch_nfs_services)
# mock nfs daemons
def _get_nfs_instances(service_name=None):
if not enable:
return []
instances = {
'nfs.foo': [
DaemonDescription(daemon_id='foo.host_a', status=1),
DaemonDescription(daemon_id='foo.host_b', status=1)
],
'nfs.bar': [
DaemonDescription(daemon_id='bar.host_c', status=1)
]
}
if service_name is not None:
return instances[service_name]
result = []
for _, daemons in instances.items():
result.extend(daemons)
return result
ganesha.GaneshaConfOrchestrator._get_orch_nfs_instances = Mock(
side_effect=_get_nfs_instances)
def test_parse_daemon_raw_config(self):
expected_daemon_config = [
{
"block_name": "NFS_CORE_PARAM",
"enable_nlm": False,
"enable_rquota": False,
"protocols": 4,
"nfs_port": 14000
},
{
"block_name": "MDCACHE",
"dir_chunk": 0
},
{
"block_name": "NFSV4",
"recoverybackend": "rados_cluster",
"minor_versions": [1, 2]
},
{
"block_name": "RADOS_KV",
"pool": "nfs-ganesha",
"namespace": "vstart",
"userid": "vstart",
"nodeid": "a"
},
{
"block_name": "RADOS_URLS",
"userid": "vstart",
"watch_url": "'rados://nfs-ganesha/vstart/conf-nfs.vstart'"
},
{
"block_name": "%url",
"value": "rados://nfs-ganesha/vstart/conf-nfs.vstart"
}
]
daemon_config = GaneshaConfParser(self.daemon_raw_config).parse()
self.assertEqual(daemon_config, expected_daemon_config)
def test_export_parser_1(self):
blocks = GaneshaConfParser(self.export_1).parse()
self.assertIsInstance(blocks, list)
self.assertEqual(len(blocks), 1)
export = Export.from_export_block(blocks[0], '_default_',
GaneshaConf.ganesha_defaults({}))
self.assertEqual(export.export_id, 1)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/cephfs_a")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "root_squash")
self.assertEqual(export.protocols, {4})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "CEPH")
self.assertEqual(export.fsal.user_id, "ganesha")
self.assertEqual(export.fsal.fs_name, "a")
self.assertEqual(export.fsal.sec_label_xattr, None)
self.assertEqual(len(export.clients), 2)
self.assertEqual(export.clients[0].addresses,
["192.168.0.10", "192.168.1.0/8"])
self.assertEqual(export.clients[0].squash, "no_root_squash")
self.assertIsNone(export.clients[0].access_type)
self.assertEqual(export.clients[1].addresses, ["192.168.0.0/16"])
self.assertEqual(export.clients[1].squash, "all_squash")
self.assertEqual(export.clients[1].access_type, "RO")
self.assertEqual(export.cluster_id, '_default_')
self.assertEqual(export.attr_expiration_time, 0)
self.assertEqual(export.security_label, False)
def test_export_parser_2(self):
blocks = GaneshaConfParser(self.export_2).parse()
self.assertIsInstance(blocks, list)
self.assertEqual(len(blocks), 1)
export = Export.from_export_block(blocks[0], '_default_',
GaneshaConf.ganesha_defaults({}))
self.assertEqual(export.export_id, 2)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/rgw")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertEqual(export.fsal.access_key, "access_key")
self.assertEqual(export.fsal.secret_key, "secret_key")
self.assertEqual(len(export.clients), 0)
self.assertEqual(export.cluster_id, '_default_')
def test_daemon_conf_parser_a(self):
blocks = GaneshaConfParser(self.conf_nodea).parse()
self.assertIsInstance(blocks, list)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0]['block_name'], "%url")
self.assertEqual(blocks[0]['value'], "rados://nfs-ganesha/bar/export-2")
self.assertEqual(blocks[1]['block_name'], "%url")
self.assertEqual(blocks[1]['value'], "rados://nfs-ganesha/bar/export-1")
def test_daemon_conf_parser_b(self):
blocks = GaneshaConfParser(self.conf_nodeb).parse()
self.assertIsInstance(blocks, list)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0]['block_name'], "%url")
self.assertEqual(blocks[0]['value'], "rados://nfs-ganesha/bar/export-1")
def test_ganesha_conf(self):
for cluster_id, info in self.clusters.items():
self._do_test_ganesha_conf(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_ganesha_conf(self, cluster, expected_exports):
ganesha_conf = GaneshaConf.instance(cluster)
exports = ganesha_conf.exports
self.assertEqual(len(exports.items()), 2)
self.assertIn(1, exports)
self.assertIn(2, exports)
# export_id = 1 asserts
export = exports[1]
self.assertEqual(export.export_id, 1)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/cephfs_a")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "root_squash")
self.assertEqual(export.protocols, {4})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "CEPH")
self.assertEqual(export.fsal.user_id, "ganesha")
self.assertEqual(export.fsal.fs_name, "a")
self.assertEqual(export.fsal.sec_label_xattr, None)
self.assertEqual(len(export.clients), 2)
self.assertEqual(export.clients[0].addresses,
["192.168.0.10", "192.168.1.0/8"])
self.assertEqual(export.clients[0].squash, "no_root_squash")
self.assertIsNone(export.clients[0].access_type)
self.assertEqual(export.clients[1].addresses, ["192.168.0.0/16"])
self.assertEqual(export.clients[1].squash, "all_squash")
self.assertEqual(export.clients[1].access_type, "RO")
self.assertEqual(export.attr_expiration_time, 0)
self.assertEqual(export.security_label, False)
self.assertSetEqual(export.daemons, set(expected_exports[1]))
# export_id = 2 asserts
export = exports[2]
self.assertEqual(export.export_id, 2)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/rgw")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertEqual(export.fsal.access_key, "access_key")
self.assertEqual(export.fsal.secret_key, "secret_key")
self.assertEqual(len(export.clients), 0)
self.assertSetEqual(export.daemons, set(expected_exports[2]))
def test_config_dict(self):
for cluster_id, info in self.clusters.items():
self._do_test_config_dict(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_config_dict(self, cluster, expected_exports):
conf = GaneshaConf.instance(cluster)
export = conf.exports[1]
ex_dict = export.to_dict()
self.assertDictEqual(ex_dict, {
'daemons': expected_exports[1],
'export_id': 1,
'path': '/',
'pseudo': '/cephfs_a',
'cluster_id': cluster,
'tag': None,
'access_type': 'RW',
'squash': 'root_squash',
'security_label': False,
'protocols': [4],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.10", "192.168.1.0/8"],
'access_type': None,
'squash': 'no_root_squash'
}, {
'addresses': ["192.168.0.0/16"],
'access_type': 'RO',
'squash': 'all_squash'
}],
'fsal': {
'name': 'CEPH',
'user_id': 'ganesha',
'fs_name': 'a',
'sec_label_xattr': None
}
})
export = conf.exports[2]
ex_dict = export.to_dict()
self.assertDictEqual(ex_dict, {
'daemons': expected_exports[2],
'export_id': 2,
'path': '/',
'pseudo': '/rgw',
'cluster_id': cluster,
'tag': None,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [3, 4],
'transports': ['TCP', 'UDP'],
'clients': [],
'fsal': {
'name': 'RGW',
'rgw_user_id': 'testuser'
}
})
def test_config_from_dict(self):
for cluster_id, info in self.clusters.items():
self._do_test_config_from_dict(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_config_from_dict(self, cluster_id, expected_exports):
export = Export.from_dict(1, {
'daemons': expected_exports[1],
'export_id': 1,
'path': '/',
'cluster_id': cluster_id,
'pseudo': '/cephfs_a',
'tag': None,
'access_type': 'RW',
'squash': 'root_squash',
'security_label': True,
'protocols': [4],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.10", "192.168.1.0/8"],
'access_type': None,
'squash': 'no_root_squash'
}, {
'addresses': ["192.168.0.0/16"],
'access_type': 'RO',
'squash': 'all_squash'
}],
'fsal': {
'name': 'CEPH',
'user_id': 'ganesha',
'fs_name': 'a',
'sec_label_xattr': 'security.selinux'
}
})
self.assertEqual(export.export_id, 1)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/cephfs_a")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "root_squash")
self.assertEqual(export.protocols, {4})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "CEPH")
self.assertEqual(export.fsal.user_id, "ganesha")
self.assertEqual(export.fsal.fs_name, "a")
self.assertEqual(export.fsal.sec_label_xattr, 'security.selinux')
self.assertEqual(len(export.clients), 2)
self.assertEqual(export.clients[0].addresses,
["192.168.0.10", "192.168.1.0/8"])
self.assertEqual(export.clients[0].squash, "no_root_squash")
self.assertIsNone(export.clients[0].access_type)
self.assertEqual(export.clients[1].addresses, ["192.168.0.0/16"])
self.assertEqual(export.clients[1].squash, "all_squash")
self.assertEqual(export.clients[1].access_type, "RO")
self.assertEqual(export.daemons, set(expected_exports[1]))
self.assertEqual(export.cluster_id, cluster_id)
self.assertEqual(export.attr_expiration_time, 0)
self.assertEqual(export.security_label, True)
export = Export.from_dict(2, {
'daemons': expected_exports[2],
'export_id': 2,
'path': '/',
'pseudo': '/rgw',
'cluster_id': cluster_id,
'tag': None,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [],
'fsal': {
'name': 'RGW',
'rgw_user_id': 'testuser'
}
})
self.assertEqual(export.export_id, 2)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/rgw")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertIsNone(export.fsal.access_key)
self.assertIsNone(export.fsal.secret_key)
self.assertEqual(len(export.clients), 0)
self.assertEqual(export.daemons, set(expected_exports[2]))
self.assertEqual(export.cluster_id, cluster_id)
def test_gen_raw_config(self):
for cluster_id, info in self.clusters.items():
self._do_test_gen_raw_config(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_gen_raw_config(self, cluster_id, expected_exports):
conf = GaneshaConf.instance(cluster_id)
# pylint: disable=W0212
export = conf.exports[1]
del conf.exports[1]
conf._save_export(export)
conf = GaneshaConf.instance(cluster_id)
exports = conf.exports
self.assertEqual(len(exports.items()), 2)
self.assertIn(1, exports)
self.assertIn(2, exports)
# export_id = 1 asserts
export = exports[1]
self.assertEqual(export.export_id, 1)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/cephfs_a")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "root_squash")
self.assertEqual(export.protocols, {4})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "CEPH")
self.assertEqual(export.fsal.user_id, "ganesha")
self.assertEqual(export.fsal.fs_name, "a")
self.assertEqual(export.fsal.sec_label_xattr, None)
self.assertEqual(len(export.clients), 2)
self.assertEqual(export.clients[0].addresses,
["192.168.0.10", "192.168.1.0/8"])
self.assertEqual(export.clients[0].squash, "no_root_squash")
self.assertIsNone(export.clients[0].access_type)
self.assertEqual(export.clients[1].addresses, ["192.168.0.0/16"])
self.assertEqual(export.clients[1].squash, "all_squash")
self.assertEqual(export.clients[1].access_type, "RO")
self.assertEqual(export.daemons, set(expected_exports[1]))
self.assertEqual(export.cluster_id, cluster_id)
self.assertEqual(export.attr_expiration_time, 0)
self.assertEqual(export.security_label, False)
# export_id = 2 asserts
export = exports[2]
self.assertEqual(export.export_id, 2)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/rgw")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertEqual(export.fsal.access_key, "access_key")
self.assertEqual(export.fsal.secret_key, "secret_key")
self.assertEqual(len(export.clients), 0)
self.assertEqual(export.daemons, set(expected_exports[2]))
self.assertEqual(export.cluster_id, cluster_id)
def test_update_export(self):
for cluster_id, info in self.clusters.items():
self._do_test_update_export(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_update_export(self, cluster_id, expected_exports):
ganesha.RgwClient = MagicMock()
admin_inst_mock = MagicMock()
admin_inst_mock.get_user_keys.return_value = {
'access_key': 'access_key',
'secret_key': 'secret_key'
}
ganesha.RgwClient.admin_instance.return_value = admin_inst_mock
conf = GaneshaConf.instance(cluster_id)
conf.update_export({
'export_id': 2,
'daemons': expected_exports[2],
'path': 'bucket',
'pseudo': '/rgw/bucket',
'cluster_id': cluster_id,
'tag': 'bucket_tag',
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'rgw_user_id': 'testuser'
}
})
conf = GaneshaConf.instance(cluster_id)
export = conf.get_export(2)
self.assertEqual(export.export_id, 2)
self.assertEqual(export.path, "bucket")
self.assertEqual(export.pseudo, "/rgw/bucket")
self.assertEqual(export.tag, "bucket_tag")
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertEqual(export.fsal.access_key, "access_key")
self.assertEqual(export.fsal.secret_key, "secret_key")
self.assertEqual(len(export.clients), 1)
self.assertEqual(export.clients[0].addresses, ["192.168.0.0/16"])
self.assertIsNone(export.clients[0].squash)
self.assertIsNone(export.clients[0].access_type)
self.assertEqual(export.daemons, set(expected_exports[2]))
self.assertEqual(export.cluster_id, cluster_id)
def test_remove_export(self):
for cluster_id, info in self.clusters.items():
self._do_test_remove_export(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_remove_export(self, cluster_id, expected_exports):
conf = GaneshaConf.instance(cluster_id)
conf.remove_export(1)
exports = conf.list_exports()
self.assertEqual(len(exports), 1)
self.assertEqual(2, exports[0].export_id)
export = conf.get_export(2)
self.assertEqual(export.export_id, 2)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/rgw")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertEqual(export.fsal.access_key, "access_key")
self.assertEqual(export.fsal.secret_key, "secret_key")
self.assertEqual(len(export.clients), 0)
self.assertEqual(export.daemons, set(expected_exports[2]))
self.assertEqual(export.cluster_id, cluster_id)
def test_create_export_rgw(self):
for cluster_id, info in self.clusters.items():
self._do_test_create_export_rgw(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_create_export_rgw(self, cluster_id, expected_exports):
ganesha.RgwClient = MagicMock()
admin_inst_mock = MagicMock()
admin_inst_mock.get_user_keys.return_value = {
'access_key': 'access_key2',
'secret_key': 'secret_key2'
}
ganesha.RgwClient.admin_instance.return_value = admin_inst_mock
conf = GaneshaConf.instance(cluster_id)
ex_id = conf.create_export({
'daemons': expected_exports[3],
'path': 'bucket',
'pseudo': '/rgw/bucket',
'tag': 'bucket_tag',
'cluster_id': cluster_id,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': False,
'protocols': [4, 3],
'transports': ['TCP', 'UDP'],
'clients': [{
'addresses': ["192.168.0.0/16"],
'access_type': None,
'squash': None
}],
'fsal': {
'name': 'RGW',
'rgw_user_id': 'testuser'
}
})
conf = GaneshaConf.instance(cluster_id)
exports = conf.list_exports()
self.assertEqual(len(exports), 3)
export = conf.get_export(ex_id)
self.assertEqual(export.export_id, ex_id)
self.assertEqual(export.path, "bucket")
self.assertEqual(export.pseudo, "/rgw/bucket")
self.assertEqual(export.tag, "bucket_tag")
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4, 3})
self.assertEqual(export.transports, {"TCP", "UDP"})
self.assertEqual(export.fsal.name, "RGW")
self.assertEqual(export.fsal.rgw_user_id, "testuser")
self.assertEqual(export.fsal.access_key, "access_key2")
self.assertEqual(export.fsal.secret_key, "secret_key2")
self.assertEqual(len(export.clients), 1)
self.assertEqual(export.clients[0].addresses, ["192.168.0.0/16"])
self.assertIsNone(export.clients[0].squash)
self.assertIsNone(export.clients[0].access_type)
self.assertEqual(export.daemons, set(expected_exports[3]))
self.assertEqual(export.cluster_id, cluster_id)
def test_create_export_cephfs(self):
for cluster_id, info in self.clusters.items():
self._do_test_create_export_cephfs(cluster_id, info['exports'])
self._reset_temp_store()
def _do_test_create_export_cephfs(self, cluster_id, expected_exports):
ganesha.CephX = MagicMock()
ganesha.CephX.list_clients.return_value = ["fs"]
ganesha.CephX.get_client_key.return_value = "fs_key"
ganesha.CephFS = MagicMock()
ganesha.CephFS.dir_exists.return_value = True
conf = GaneshaConf.instance(cluster_id)
ex_id = conf.create_export({
'daemons': expected_exports[3],
'path': '/',
'pseudo': '/cephfs2',
'cluster_id': cluster_id,
'tag': None,
'access_type': 'RW',
'squash': 'all_squash',
'security_label': True,
'protocols': [4],
'transports': ['TCP'],
'clients': [],
'fsal': {
'name': 'CEPH',
'user_id': 'fs',
'fs_name': None,
'sec_label_xattr': 'security.selinux'
}
})
conf = GaneshaConf.instance(cluster_id)
exports = conf.list_exports()
self.assertEqual(len(exports), 3)
export = conf.get_export(ex_id)
self.assertEqual(export.export_id, ex_id)
self.assertEqual(export.path, "/")
self.assertEqual(export.pseudo, "/cephfs2")
self.assertIsNone(export.tag)
self.assertEqual(export.access_type, "RW")
self.assertEqual(export.squash, "all_squash")
self.assertEqual(export.protocols, {4})
self.assertEqual(export.transports, {"TCP"})
self.assertEqual(export.fsal.name, "CEPH")
self.assertEqual(export.fsal.user_id, "fs")
self.assertEqual(export.fsal.cephx_key, "fs_key")
self.assertEqual(export.fsal.sec_label_xattr, "security.selinux")
self.assertIsNone(export.fsal.fs_name)
self.assertEqual(len(export.clients), 0)
self.assertEqual(export.daemons, set(expected_exports[3]))
self.assertEqual(export.cluster_id, cluster_id)
self.assertEqual(export.attr_expiration_time, 0)
self.assertEqual(export.security_label, True)
def test_reload_daemons(self):
# Fail to import call in Python 3.8, see https://bugs.python.org/issue35753
mock_call = unittest.mock.call
# Orchestrator cluster: reload all daemon config objects.
conf = GaneshaConf.instance('foo')
calls = [mock_call(conf) for conf in conf.list_daemon_confs()]
for daemons in [[], ['a', 'b']]:
conf.reload_daemons(daemons)
self.io_mock.notify.assert_has_calls(calls)
self.io_mock.reset_mock()
# User-defined cluster: reload daemons in the parameter
self._set_user_defined_clusters_location()
conf = GaneshaConf.instance('_default_')
calls = [mock_call('conf-{}'.format(daemon)) for daemon in ['nodea', 'nodeb']]
conf.reload_daemons(['nodea', 'nodeb'])
self.io_mock.notify.assert_has_calls(calls)
def test_list_daemons(self):
for cluster_id, info in self.clusters.items():
instance = GaneshaConf.instance(cluster_id)
daemons = instance.list_daemons()
for daemon in daemons:
self.assertEqual(daemon['cluster_id'], cluster_id)
self.assertEqual(daemon['cluster_type'], info['type'])
self.assertIn('daemon_id', daemon)
self.assertIn('status', daemon)
self.assertIn('status_desc', daemon)
self.assertEqual([daemon['daemon_id'] for daemon in daemons], info['daemons'])
def test_validate_orchestrator(self):
cluster_id = 'foo'
cluster_info = self.clusters[cluster_id]
instance = GaneshaConf.instance(cluster_id)
export = MagicMock()
# export can be linked to none or all daemons
export_daemons = [[], cluster_info['daemons']]
for daemons in export_daemons:
export.daemons = daemons
instance.validate(export)
# raise if linking to partial or non-exist daemons
export_daemons = [cluster_info['daemons'][:1], 'xxx']
for daemons in export_daemons:
with self.assertRaises(NFSException):
export.daemons = daemons
instance.validate(export)
def test_validate_user(self):
self._set_user_defined_clusters_location()
cluster_id = '_default_'
instance = GaneshaConf.instance(cluster_id)
export = MagicMock()
# export can be linked to none, partial, or all daemons
fake_daemons = ['nodea', 'nodeb']
export_daemons = [[], fake_daemons[:1], fake_daemons]
for daemons in export_daemons:
export.daemons = daemons
instance.validate(export)
# raise if linking to non-exist daemons
export_daemons = ['xxx']
for daemons in export_daemons:
with self.assertRaises(NFSException):
export.daemons = daemons
instance.validate(export)
def _verify_locations(self, locations, cluster_ids):
for cluster_id in cluster_ids:
self.assertIn(cluster_id, locations)
cluster = locations.pop(cluster_id)
self.assertDictEqual(cluster, {key: cluster[key] for key in [
'pool', 'namespace', 'type', 'daemon_conf']})
self.assertDictEqual(locations, {})
def test_get_cluster_locations(self):
# pylint: disable=protected-access
# There is only a Orchestrator cluster.
self._mock_orchestrator(True)
locations = ganesha.Ganesha._get_clusters_locations()
self._verify_locations(locations, ['foo'])
# No cluster.
self._mock_orchestrator(False)
with self.assertRaises(NFSException):
ganesha.Ganesha._get_clusters_locations()
# There is only a user-defined cluster.
self._set_user_defined_clusters_location()
self._mock_orchestrator(False)
locations = ganesha.Ganesha._get_clusters_locations()
self._verify_locations(locations, ['_default_'])
# There are both Orchestrator cluster and user-defined cluster.
self._set_user_defined_clusters_location()
self._mock_orchestrator(True)
locations = ganesha.Ganesha._get_clusters_locations()
self._verify_locations(locations, ['foo', '_default_'])
def test_get_cluster_locations_conflict(self):
# pylint: disable=protected-access
# Pool/namespace collision.
self._set_user_defined_clusters_location('nfs-ganesha/foo')
with self.assertRaises(NFSException) as ctx:
ganesha.Ganesha._get_clusters_locations()
self.assertIn('already in use', str(ctx.exception))
# Cluster name collision with orch. cluster.
self._set_user_defined_clusters_location('foo:nfs-ganesha/bar')
with self.assertRaises(NFSException) as ctx:
ganesha.Ganesha._get_clusters_locations()
self.assertIn('Detected a conflicting NFS-Ganesha cluster', str(ctx.exception))
# Cluster name collision with user-defined cluster.
self._set_user_defined_clusters_location(
'cluster1:nfs-ganesha/bar,cluster1:fake-pool/fake-ns'
)
with self.assertRaises(NFSException) as ctx:
ganesha.Ganesha._get_clusters_locations()
self.assertIn('Duplicate Ganesha cluster definition', str(ctx.exception))
class NFSGaneshaUiControllerTest(ControllerTestCase):

View File

@ -149,6 +149,20 @@ class NFSCluster:
except Exception as e:
return exception_handler(e, "Failed to list NFS Cluster")
def list_daemons(self):
completion = self.mgr.list_daemons(daemon_type='nfs')
# Here completion.result is a list DaemonDescription objects
daemons = orchestrator.raise_if_exception(completion)
return [
{
'cluster_id': instance.service_id(),
'daemon_id': instance.daemon_id,
'cluster_type': 'orchestrator',
'status': instance.status,
'status_desc': instance.status_desc
} for instance in daemons
]
def _show_nfs_cluster_info(self, cluster_id: str) -> Dict[str, Any]:
completion = self.mgr.list_daemons(daemon_type='nfs')
# Here completion.result is a list DaemonDescription objects

View File

@ -172,6 +172,20 @@ class ExportMgr:
log.info('no exports for cluster %s', cluster_id)
return None
def _fetch_export_id(
self,
cluster_id: str,
export_id: int
) -> Optional[Export]:
try:
for ex in self.exports[cluster_id]:
if ex.export_id == export_id:
return ex
return None
except KeyError:
log.info(f'no exports for cluster {cluster_id}')
return None
def _delete_export_user(self, export: Export) -> None:
if isinstance(export.fsal, CephFSFSAL):
assert export.fsal.user_id
@ -364,6 +378,12 @@ class ExportMgr:
raise NFSException(f"Failed to delete exports: {err} and {ret}")
log.info("All exports successfully deleted for cluster id: %s", cluster_id)
def list_all_exports(self):
r = []
for cluster_id, ls in self.exports.items():
r.extend([e.to_dict() for e in ls])
return r
@export_cluster_checker
def list_exports(self,
cluster_id: str,
@ -397,6 +417,14 @@ class ExportMgr:
except Exception as e:
return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}")
def get_export_by_id(
self,
cluster_id: str,
export_id: int
) -> Dict[Any, Any]:
export = self._fetch_export_id(cluster_id, export_id)
return export.to_dict() if export else None
def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
try:
if not export_config:
@ -420,9 +448,9 @@ class ExportMgr:
ret, out, err = (0, '', '')
for export in j:
try:
r, o, e = self._apply_export(cluster_id, export)
r, o, e, ex = self._apply_export(cluster_id, export)
except Exception as ex:
r, o, e = exception_handler(ex, f'Failed to apply export: {ex}')
r, o, e, ex = exception_handler(ex, f'Failed to apply export: {ex}')
if r:
ret = r
if o:
@ -431,7 +459,8 @@ class ExportMgr:
err += e + '\n'
return ret, out, err
else:
return self._apply_export(cluster_id, j)
r, o, e, ex = self._apply_export(cluster_id, j)
return r, o, e
except NotImplementedError:
return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
except Exception as e:
@ -623,7 +652,7 @@ class ExportMgr:
self,
cluster_id: str,
new_export_dict: Dict,
) -> Tuple[int, str, str]:
) -> Tuple[int, str, str, Export]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
@ -661,7 +690,7 @@ class ExportMgr:
if not old_export:
self._create_export_user(new_export)
self._save_export(cluster_id, new_export)
return 0, f'Added export {new_export.pseudo}', ''
return 0, f'Added export {new_export.pseudo}', '', new_export
if old_export.fsal.name != new_export.fsal.name:
raise NFSInvalidOperation('FSAL change not allowed')
@ -706,4 +735,4 @@ class ExportMgr:
# TODO: detect whether the update is such that a reload is sufficient
restart_nfs_service(self.mgr, new_export.cluster_id)
return 0, f"Updated export {new_export.pseudo}", ""
return 0, f"Updated export {new_export.pseudo}", "", new_export

View File

@ -1,13 +1,13 @@
import logging
import threading
from typing import Tuple, Optional, List
from typing import Tuple, Optional, List, Dict, Any
from mgr_module import MgrModule, CLICommand, Option, CLICheckNonemptyFileInput
import orchestrator
from .export import ExportMgr
from .cluster import NFSCluster
from typing import Any
from .utils import available_clusters
log = logging.getLogger(__name__)
@ -130,3 +130,37 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
def _cmd_nfs_cluster_config_reset(self, cluster_id: str) -> Tuple[int, str, str]:
"""Reset NFS-Ganesha Config to default"""
return self.nfs.reset_nfs_cluster_config(cluster_id=cluster_id)
def is_active(self) -> bool:
return True
def export_ls(self) -> List[Dict[Any, Any]]:
return self.export_mgr.list_all_exports()
def export_get(self, cluster_id: str, export_id: int) -> Dict[Any, Any]:
return self.export_mgr.get_export_by_id(cluster_id, export_id)
def export_rm(self, cluster_id: str, pseudo: str) -> None:
self.export_mgr.delete_export(cluster_id=cluster_id, pseudo_path=pseudo)
def daemon_ls(self) -> List[Dict[Any, Any]]:
return self.nfs.list_daemons()
def cluster_ls(self) -> List[str]:
return [
{
'pool': NFS_POOL_NAME,
'namespace': cluster_id,
'type': 'orchestrator',
'daemon_conf': None,
} for cluster_id in available_clusters()
]
def cluster_fsals(self) -> List[str]:
return ['CEPH', 'RGW']
def export_apply(self, cluster_id: str, export: Dict[Any, Any]) -> Dict[Any, Any]:
ret, out, err, export = self.export_mgr._apply_export(cluster_id, export)
if ret:
return None
return export.to_dict()