2020-09-16 16:39:17 +00:00
|
|
|
# NOTE: these tests are not yet compatible with vstart_runner.py.
|
2020-06-16 08:45:22 +00:00
|
|
|
import errno
|
2020-06-02 11:34:19 +00:00
|
|
|
import json
|
2020-04-30 10:10:52 +00:00
|
|
|
import time
|
|
|
|
import logging
|
|
|
|
from io import BytesIO
|
|
|
|
|
|
|
|
from tasks.mgr.mgr_test_case import MgrTestCase
|
2020-06-02 11:34:19 +00:00
|
|
|
from teuthology.exceptions import CommandFailedError
|
2020-04-30 10:10:52 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2020-06-25 06:53:39 +00:00
|
|
|
# TODO Add test for cluster update when ganesha can be deployed on multiple ports.
|
2020-04-30 10:10:52 +00:00
|
|
|
class TestNFS(MgrTestCase):
|
2020-08-17 07:20:36 +00:00
|
|
|
def _cmd(self, *args):
|
2020-05-28 10:57:25 +00:00
|
|
|
return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
|
|
|
|
|
2020-04-30 10:10:52 +00:00
|
|
|
def _nfs_cmd(self, *args):
|
2020-05-28 10:57:25 +00:00
|
|
|
return self._cmd("nfs", *args)
|
2020-04-30 10:10:52 +00:00
|
|
|
|
|
|
|
def _orch_cmd(self, *args):
|
2020-05-28 10:57:25 +00:00
|
|
|
return self._cmd("orch", *args)
|
2020-04-30 10:10:52 +00:00
|
|
|
|
|
|
|
def _sys_cmd(self, cmd):
|
|
|
|
cmd[0:0] = ['sudo']
|
|
|
|
ret = self.ctx.cluster.run(args=cmd, check_status=False, stdout=BytesIO(), stderr=BytesIO())
|
|
|
|
stdout = ret[0].stdout
|
|
|
|
if stdout:
|
|
|
|
return stdout.getvalue()
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(TestNFS, self).setUp()
|
|
|
|
self.cluster_id = "test"
|
|
|
|
self.export_type = "cephfs"
|
|
|
|
self.pseudo_path = "/cephfs"
|
2020-05-28 10:57:25 +00:00
|
|
|
self.path = "/"
|
|
|
|
self.fs_name = "nfs-cephfs"
|
|
|
|
self.expected_name = "nfs.ganesha-test"
|
2020-06-23 18:25:46 +00:00
|
|
|
self.sample_export = {
|
|
|
|
"export_id": 1,
|
|
|
|
"path": self.path,
|
|
|
|
"cluster_id": self.cluster_id,
|
|
|
|
"pseudo": self.pseudo_path,
|
|
|
|
"access_type": "RW",
|
|
|
|
"squash": "no_root_squash",
|
|
|
|
"security_label": True,
|
|
|
|
"protocols": [
|
|
|
|
4
|
|
|
|
],
|
|
|
|
"transports": [
|
|
|
|
"TCP"
|
|
|
|
],
|
|
|
|
"fsal": {
|
|
|
|
"name": "CEPH",
|
|
|
|
"user_id": "test1",
|
|
|
|
"fs_name": self.fs_name,
|
|
|
|
"sec_label_xattr": ''
|
|
|
|
},
|
|
|
|
"clients": []
|
|
|
|
}
|
2020-04-30 10:10:52 +00:00
|
|
|
|
|
|
|
def _check_nfs_server_status(self):
|
|
|
|
res = self._sys_cmd(['systemctl', 'status', 'nfs-server'])
|
|
|
|
if isinstance(res, bytes) and b'Active: active' in res:
|
|
|
|
self._disable_nfs()
|
|
|
|
|
|
|
|
def _disable_nfs(self):
|
|
|
|
log.info("Disabling NFS")
|
|
|
|
self._sys_cmd(['systemctl', 'disable', 'nfs-server', '--now'])
|
|
|
|
|
2020-08-17 07:42:11 +00:00
|
|
|
def _fetch_nfs_status(self):
|
|
|
|
return self._orch_cmd('ps', f'--service_name={self.expected_name}')
|
|
|
|
|
|
|
|
def _check_nfs_cluster_status(self, expected_status, fail_msg):
|
|
|
|
'''
|
|
|
|
Tests if nfs cluster created or deleted successfully
|
|
|
|
:param expected_status: Status to be verified
|
|
|
|
:param fail_msg: Message to be printed if test failed
|
|
|
|
'''
|
|
|
|
# Wait for few seconds as ganesha daemon takes few seconds to be deleted/created
|
|
|
|
wait_time = 10
|
|
|
|
while wait_time <= 60:
|
|
|
|
time.sleep(wait_time)
|
|
|
|
if expected_status in self._fetch_nfs_status():
|
|
|
|
return
|
|
|
|
wait_time += 10
|
|
|
|
self.fail(fail_msg)
|
2020-04-30 10:10:52 +00:00
|
|
|
|
2020-06-23 18:17:43 +00:00
|
|
|
def _check_auth_ls(self, export_id=1, check_in=False):
|
|
|
|
'''
|
|
|
|
Tests export user id creation or deletion.
|
|
|
|
:param export_id: Denotes export number
|
|
|
|
:param check_in: Check specified export id
|
|
|
|
'''
|
|
|
|
output = self._cmd('auth', 'ls')
|
|
|
|
if check_in:
|
|
|
|
self.assertIn(f'client.{self.cluster_id}{export_id}', output)
|
|
|
|
else:
|
|
|
|
self.assertNotIn(f'client-{self.cluster_id}', output)
|
|
|
|
|
2020-06-02 11:34:19 +00:00
|
|
|
def _test_idempotency(self, cmd_func, cmd_args):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test idempotency of commands. It first runs the TestNFS test method
|
|
|
|
for a command and then checks the result of command run again. TestNFS
|
|
|
|
test method has required checks to verify that command works.
|
|
|
|
:param cmd_func: TestNFS method
|
|
|
|
:param cmd_args: nfs command arguments to be run
|
|
|
|
'''
|
2020-06-02 11:34:19 +00:00
|
|
|
cmd_func()
|
|
|
|
ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args)
|
|
|
|
if ret != 0:
|
2020-06-19 17:36:12 +00:00
|
|
|
self.fail("Idempotency test failed")
|
2020-06-02 11:34:19 +00:00
|
|
|
|
2020-05-28 10:57:25 +00:00
|
|
|
def _test_create_cluster(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test single nfs cluster deployment.
|
|
|
|
'''
|
|
|
|
# Disable any running nfs ganesha daemon
|
2020-04-30 10:10:52 +00:00
|
|
|
self._check_nfs_server_status()
|
2020-05-28 10:57:25 +00:00
|
|
|
self._nfs_cmd('cluster', 'create', self.export_type, self.cluster_id)
|
2020-06-19 17:36:12 +00:00
|
|
|
# Check for expected status and daemon name (nfs.ganesha-<cluster_id>)
|
2020-08-17 07:42:11 +00:00
|
|
|
self._check_nfs_cluster_status('running', 'NFS Ganesha cluster deployment failed')
|
2020-04-30 10:10:52 +00:00
|
|
|
|
2020-05-28 10:57:25 +00:00
|
|
|
def _test_delete_cluster(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test deletion of a single nfs cluster.
|
|
|
|
'''
|
2020-05-28 10:57:25 +00:00
|
|
|
self._nfs_cmd('cluster', 'delete', self.cluster_id)
|
2020-08-17 07:42:11 +00:00
|
|
|
self._check_nfs_cluster_status('No daemons reported',
|
|
|
|
'NFS Ganesha cluster could not be deleted')
|
2020-04-30 10:10:52 +00:00
|
|
|
|
2020-06-16 08:45:22 +00:00
|
|
|
def _test_list_cluster(self, empty=False):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test listing of deployed nfs clusters. If nfs cluster is deployed then
|
|
|
|
it checks for expected cluster id. Otherwise checks nothing is listed.
|
|
|
|
:param empty: If true it denotes no cluster is deployed.
|
|
|
|
'''
|
2020-06-16 08:45:22 +00:00
|
|
|
if empty:
|
|
|
|
cluster_id = ''
|
|
|
|
else:
|
|
|
|
cluster_id = self.cluster_id
|
2020-06-11 05:27:50 +00:00
|
|
|
nfs_output = self._nfs_cmd('cluster', 'ls')
|
2020-06-16 08:45:22 +00:00
|
|
|
self.assertEqual(cluster_id, nfs_output.strip())
|
2020-06-11 05:27:50 +00:00
|
|
|
|
2020-05-28 10:57:25 +00:00
|
|
|
def _create_export(self, export_id, create_fs=False, extra_cmd=None):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test creation of a single export.
|
|
|
|
:param export_id: Denotes export number
|
|
|
|
:param create_fs: If false filesytem exists. Otherwise create it.
|
|
|
|
:param extra_cmd: List of extra arguments for creating export.
|
|
|
|
'''
|
2020-05-28 10:57:25 +00:00
|
|
|
if create_fs:
|
|
|
|
self._cmd('fs', 'volume', 'create', self.fs_name)
|
|
|
|
export_cmd = ['nfs', 'export', 'create', 'cephfs', self.fs_name, self.cluster_id]
|
|
|
|
if isinstance(extra_cmd, list):
|
|
|
|
export_cmd.extend(extra_cmd)
|
|
|
|
else:
|
|
|
|
export_cmd.append(self.pseudo_path)
|
2020-06-19 17:36:12 +00:00
|
|
|
# Runs the nfs export create command
|
2020-05-28 10:57:25 +00:00
|
|
|
self._cmd(*export_cmd)
|
2020-06-23 18:17:43 +00:00
|
|
|
# Check if user id for export is created
|
|
|
|
self._check_auth_ls(export_id, check_in=True)
|
2020-06-19 17:36:12 +00:00
|
|
|
res = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'get',
|
|
|
|
f'export-{export_id}', '-'])
|
|
|
|
# Check if export object is created
|
2020-05-28 10:57:25 +00:00
|
|
|
if res == b'':
|
2020-06-19 17:36:12 +00:00
|
|
|
self.fail("Export cannot be created")
|
2020-05-28 10:57:25 +00:00
|
|
|
|
|
|
|
def _create_default_export(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Deploy a single nfs cluster and create export with default options.
|
|
|
|
'''
|
|
|
|
self._test_create_cluster()
|
|
|
|
self._create_export(export_id='1', create_fs=True)
|
2020-05-28 10:57:25 +00:00
|
|
|
|
|
|
|
def _delete_export(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Delete an export.
|
|
|
|
'''
|
2020-05-28 10:57:25 +00:00
|
|
|
self._nfs_cmd('export', 'delete', self.cluster_id, self.pseudo_path)
|
2020-06-23 18:17:43 +00:00
|
|
|
self._check_auth_ls()
|
2020-05-28 10:57:25 +00:00
|
|
|
|
2020-06-15 19:09:58 +00:00
|
|
|
def _test_list_export(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test listing of created exports.
|
|
|
|
'''
|
2020-06-15 19:09:58 +00:00
|
|
|
nfs_output = json.loads(self._nfs_cmd('export', 'ls', self.cluster_id))
|
|
|
|
self.assertIn(self.pseudo_path, nfs_output)
|
|
|
|
|
2020-06-23 18:25:46 +00:00
|
|
|
def _test_list_detailed(self, sub_vol_path):
|
|
|
|
'''
|
|
|
|
Test listing of created exports with detailed option.
|
|
|
|
:param sub_vol_path: Denotes path of subvolume
|
|
|
|
'''
|
|
|
|
nfs_output = json.loads(self._nfs_cmd('export', 'ls', self.cluster_id, '--detailed'))
|
|
|
|
# Export-1 with default values (access type = rw and path = '\')
|
|
|
|
self.assertDictEqual(self.sample_export, nfs_output[0])
|
|
|
|
# Export-2 with r only
|
|
|
|
self.sample_export['export_id'] = 2
|
|
|
|
self.sample_export['pseudo'] = self.pseudo_path + '1'
|
|
|
|
self.sample_export['access_type'] = 'RO'
|
|
|
|
self.sample_export['fsal']['user_id'] = self.cluster_id + '2'
|
|
|
|
self.assertDictEqual(self.sample_export, nfs_output[1])
|
|
|
|
# Export-3 for subvolume with r only
|
|
|
|
self.sample_export['export_id'] = 3
|
|
|
|
self.sample_export['path'] = sub_vol_path
|
|
|
|
self.sample_export['pseudo'] = self.pseudo_path + '2'
|
|
|
|
self.sample_export['fsal']['user_id'] = self.cluster_id + '3'
|
|
|
|
self.assertDictEqual(self.sample_export, nfs_output[2])
|
|
|
|
# Export-4 for subvolume
|
|
|
|
self.sample_export['export_id'] = 4
|
|
|
|
self.sample_export['pseudo'] = self.pseudo_path + '3'
|
|
|
|
self.sample_export['access_type'] = 'RW'
|
|
|
|
self.sample_export['fsal']['user_id'] = self.cluster_id + '4'
|
|
|
|
self.assertDictEqual(self.sample_export, nfs_output[3])
|
|
|
|
|
|
|
|
def _test_get_export(self):
|
|
|
|
'''
|
|
|
|
Test fetching of created export.
|
|
|
|
'''
|
|
|
|
nfs_output = json.loads(self._nfs_cmd('export', 'get', self.cluster_id, self.pseudo_path))
|
|
|
|
self.assertDictEqual(self.sample_export, nfs_output)
|
|
|
|
|
2020-05-28 10:57:25 +00:00
|
|
|
def _check_export_obj_deleted(self, conf_obj=False):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test if export or config object are deleted successfully.
|
|
|
|
:param conf_obj: It denotes config object needs to be checked
|
|
|
|
'''
|
2020-05-28 10:57:25 +00:00
|
|
|
rados_obj_ls = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'ls'])
|
|
|
|
|
|
|
|
if b'export-' in rados_obj_ls or (conf_obj and b'conf-nfs' in rados_obj_ls):
|
2020-06-19 17:36:12 +00:00
|
|
|
self.fail("Delete export failed")
|
2020-05-28 10:57:25 +00:00
|
|
|
|
2020-08-17 08:37:06 +00:00
|
|
|
def _get_port_ip_info(self):
|
|
|
|
'''
|
|
|
|
Return port and ip for a cluster
|
|
|
|
'''
|
|
|
|
#{'test': [{'hostname': 'smithi068', 'ip': ['172.21.15.68'], 'port': 2049}]}
|
|
|
|
info_output = json.loads(self._nfs_cmd('cluster', 'info', self.cluster_id))['test'][0]
|
|
|
|
return info_output["port"], info_output["ip"][0]
|
|
|
|
|
|
|
|
def _test_mnt(self, pseudo_path, port, ip, check=True):
|
|
|
|
'''
|
|
|
|
Test mounting of created exports
|
|
|
|
:param pseudo_path: It is the pseudo root name
|
|
|
|
:param port: Port of deployed nfs cluster
|
|
|
|
:param ip: IP of deployed nfs cluster
|
|
|
|
:param check: It denotes if i/o testing needs to be done
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
self.ctx.cluster.run(args=['sudo', 'mount', '-t', 'nfs', '-o', f'port={port}',
|
|
|
|
f'{ip}:{pseudo_path}', '/mnt'])
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Check if mount failed only when non existing pseudo path is passed
|
|
|
|
if not check and e.exitstatus == 32:
|
|
|
|
return
|
|
|
|
raise
|
|
|
|
|
|
|
|
if check:
|
|
|
|
self.ctx.cluster.run(args=['sudo', 'touch', '/mnt/test'])
|
|
|
|
out_mnt = self._sys_cmd(['sudo', 'ls', '/mnt'])
|
|
|
|
self.assertEqual(out_mnt, b'test\n')
|
|
|
|
self.ctx.cluster.run(args=['sudo', 'umount', '/mnt'])
|
|
|
|
|
2020-05-28 10:57:25 +00:00
|
|
|
def test_create_and_delete_cluster(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test successful creation and deletion of the nfs cluster.
|
|
|
|
'''
|
2020-05-28 10:57:25 +00:00
|
|
|
self._test_create_cluster()
|
2020-06-11 05:27:50 +00:00
|
|
|
self._test_list_cluster()
|
2020-05-28 10:57:25 +00:00
|
|
|
self._test_delete_cluster()
|
2020-06-16 08:45:22 +00:00
|
|
|
# List clusters again to ensure no cluster is shown
|
|
|
|
self._test_list_cluster(empty=True)
|
2020-05-28 10:57:25 +00:00
|
|
|
|
2020-06-02 11:34:19 +00:00
|
|
|
def test_create_delete_cluster_idempotency(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test idempotency of cluster create and delete commands.
|
|
|
|
'''
|
2020-06-02 11:34:19 +00:00
|
|
|
self._test_idempotency(self._test_create_cluster, ['nfs', 'cluster', 'create', self.export_type,
|
|
|
|
self.cluster_id])
|
|
|
|
self._test_idempotency(self._test_delete_cluster, ['nfs', 'cluster', 'delete', self.cluster_id])
|
|
|
|
|
2020-06-16 08:45:22 +00:00
|
|
|
def test_create_cluster_with_invalid_cluster_id(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test nfs cluster deployment failure with invalid cluster id.
|
|
|
|
'''
|
2020-06-16 08:45:22 +00:00
|
|
|
try:
|
2020-06-19 17:36:12 +00:00
|
|
|
invalid_cluster_id = '/cluster_test' # Only [A-Za-z0-9-_.] chars are valid
|
2020-06-16 08:45:22 +00:00
|
|
|
self._nfs_cmd('cluster', 'create', self.export_type, invalid_cluster_id)
|
|
|
|
self.fail(f"Cluster successfully created with invalid cluster id {invalid_cluster_id}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.EINVAL:
|
|
|
|
raise
|
|
|
|
|
|
|
|
def test_create_cluster_with_invalid_export_type(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test nfs cluster deployment failure with invalid export type.
|
|
|
|
'''
|
2020-06-16 08:45:22 +00:00
|
|
|
try:
|
|
|
|
invalid_export_type = 'rgw' # Only cephfs is valid
|
|
|
|
self._nfs_cmd('cluster', 'create', invalid_export_type, self.cluster_id)
|
|
|
|
self.fail(f"Cluster successfully created with invalid export type {invalid_export_type}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.EINVAL:
|
|
|
|
raise
|
|
|
|
|
2020-06-23 18:25:46 +00:00
|
|
|
def test_create_and_delete_export(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test successful creation and deletion of the cephfs export.
|
|
|
|
'''
|
2020-05-28 10:57:25 +00:00
|
|
|
self._create_default_export()
|
2020-06-23 18:25:46 +00:00
|
|
|
self._test_get_export()
|
2020-08-17 08:37:06 +00:00
|
|
|
port, ip = self._get_port_ip_info()
|
|
|
|
self._test_mnt(self.pseudo_path, port, ip)
|
2020-05-28 10:57:25 +00:00
|
|
|
self._delete_export()
|
2020-06-19 17:36:12 +00:00
|
|
|
# Check if rados export object is deleted
|
2020-05-28 10:57:25 +00:00
|
|
|
self._check_export_obj_deleted()
|
2020-08-17 08:37:06 +00:00
|
|
|
self._test_mnt(self.pseudo_path, port, ip, False)
|
2020-05-28 10:57:25 +00:00
|
|
|
self._test_delete_cluster()
|
|
|
|
|
2020-06-02 11:34:19 +00:00
|
|
|
def test_create_delete_export_idempotency(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test idempotency of export create and delete commands.
|
|
|
|
'''
|
2020-06-02 11:34:19 +00:00
|
|
|
self._test_idempotency(self._create_default_export, ['nfs', 'export', 'create', 'cephfs',
|
|
|
|
self.fs_name, self.cluster_id,
|
|
|
|
self.pseudo_path])
|
|
|
|
self._test_idempotency(self._delete_export, ['nfs', 'export', 'delete', self.cluster_id,
|
|
|
|
self.pseudo_path])
|
2020-08-20 12:09:33 +00:00
|
|
|
self._test_delete_cluster()
|
2020-06-02 11:34:19 +00:00
|
|
|
|
2020-05-29 18:06:34 +00:00
|
|
|
def test_create_multiple_exports(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test creating multiple exports with different access type and path.
|
|
|
|
'''
|
2020-06-23 18:25:46 +00:00
|
|
|
# Export-1 with default values (access type = rw and path = '\')
|
2020-05-28 10:57:25 +00:00
|
|
|
self._create_default_export()
|
2020-06-23 18:25:46 +00:00
|
|
|
# Export-2 with r only
|
2020-05-29 18:06:34 +00:00
|
|
|
self._create_export(export_id='2', extra_cmd=[self.pseudo_path+'1', '--readonly'])
|
2020-06-23 18:25:46 +00:00
|
|
|
# Export-3 for subvolume with r only
|
2020-05-28 10:57:25 +00:00
|
|
|
self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol')
|
2020-06-23 18:25:46 +00:00
|
|
|
fs_path = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol').strip()
|
|
|
|
self._create_export(export_id='3', extra_cmd=[self.pseudo_path+'2', '--readonly', fs_path])
|
|
|
|
# Export-4 for subvolume
|
|
|
|
self._create_export(export_id='4', extra_cmd=[self.pseudo_path+'3', fs_path])
|
|
|
|
# Check if exports gets listed
|
|
|
|
self._test_list_detailed(fs_path)
|
2020-05-28 10:57:25 +00:00
|
|
|
self._test_delete_cluster()
|
2020-06-19 17:36:12 +00:00
|
|
|
# Check if rados ganesha conf object is deleted
|
2020-05-28 10:57:25 +00:00
|
|
|
self._check_export_obj_deleted(conf_obj=True)
|
2020-06-23 18:17:43 +00:00
|
|
|
self._check_auth_ls()
|
2020-06-09 07:44:36 +00:00
|
|
|
|
|
|
|
def test_exports_on_mgr_restart(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test export availability on restarting mgr.
|
|
|
|
'''
|
2020-06-09 07:44:36 +00:00
|
|
|
self._create_default_export()
|
2020-06-19 17:36:12 +00:00
|
|
|
# unload and load module will restart the mgr
|
2020-06-09 07:44:36 +00:00
|
|
|
self._unload_module("cephadm")
|
|
|
|
self._load_module("cephadm")
|
|
|
|
self._orch_cmd("set", "backend", "cephadm")
|
2020-10-06 05:59:40 +00:00
|
|
|
# Check if ganesha daemon is running
|
|
|
|
self._check_nfs_cluster_status('running', 'Failed to redeploy NFS Ganesha cluster')
|
2020-06-19 17:36:12 +00:00
|
|
|
# Checks if created export is listed
|
2020-06-15 19:09:58 +00:00
|
|
|
self._test_list_export()
|
2020-08-17 08:37:06 +00:00
|
|
|
port, ip = self._get_port_ip_info()
|
|
|
|
self._test_mnt(self.pseudo_path, port, ip)
|
2020-06-09 07:44:36 +00:00
|
|
|
self._delete_export()
|
2020-08-20 12:09:33 +00:00
|
|
|
self._test_delete_cluster()
|
2020-06-16 10:04:36 +00:00
|
|
|
|
|
|
|
def test_export_create_with_non_existing_fsname(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test creating export with non-existing filesystem.
|
|
|
|
'''
|
2020-06-16 10:04:36 +00:00
|
|
|
try:
|
|
|
|
fs_name = 'nfs-test'
|
|
|
|
self._test_create_cluster()
|
|
|
|
self._nfs_cmd('export', 'create', 'cephfs', fs_name, self.cluster_id, self.pseudo_path)
|
|
|
|
self.fail(f"Export created with non-existing filesystem {fs_name}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
self._test_delete_cluster()
|
|
|
|
|
|
|
|
def test_export_create_with_non_existing_clusterid(self):
|
2020-06-19 17:36:12 +00:00
|
|
|
'''
|
|
|
|
Test creating cephfs export with non-existing nfs cluster.
|
|
|
|
'''
|
2020-06-16 10:04:36 +00:00
|
|
|
try:
|
|
|
|
cluster_id = 'invalidtest'
|
|
|
|
self._nfs_cmd('export', 'create', 'cephfs', self.fs_name, cluster_id, self.pseudo_path)
|
|
|
|
self.fail(f"Export created with non-existing cluster id {cluster_id}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.ENOENT:
|
|
|
|
raise
|
2020-06-30 10:55:20 +00:00
|
|
|
|
2020-07-21 09:59:35 +00:00
|
|
|
def test_export_create_with_relative_pseudo_path_and_root_directory(self):
|
|
|
|
'''
|
|
|
|
Test creating cephfs export with relative or '/' pseudo path.
|
|
|
|
'''
|
|
|
|
def check_pseudo_path(pseudo_path):
|
|
|
|
try:
|
|
|
|
self._nfs_cmd('export', 'create', 'cephfs', self.fs_name, self.cluster_id,
|
|
|
|
pseudo_path)
|
|
|
|
self.fail(f"Export created for {pseudo_path}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.EINVAL:
|
|
|
|
raise
|
|
|
|
|
|
|
|
self._test_create_cluster()
|
|
|
|
self._cmd('fs', 'volume', 'create', self.fs_name)
|
|
|
|
check_pseudo_path('invalidpath')
|
|
|
|
check_pseudo_path('/')
|
|
|
|
check_pseudo_path('//')
|
2020-08-17 07:37:33 +00:00
|
|
|
self._cmd('fs', 'volume', 'rm', self.fs_name, '--yes-i-really-mean-it')
|
2020-07-21 09:59:35 +00:00
|
|
|
self._test_delete_cluster()
|
|
|
|
|
2020-06-30 10:55:20 +00:00
|
|
|
def test_cluster_info(self):
|
|
|
|
'''
|
|
|
|
Test cluster info outputs correct ip and hostname
|
|
|
|
'''
|
|
|
|
self._test_create_cluster()
|
|
|
|
info_output = json.loads(self._nfs_cmd('cluster', 'info', self.cluster_id))
|
2020-12-09 16:52:50 +00:00
|
|
|
info_ip = info_output[self.cluster_id][0].pop("ip")
|
2020-06-30 10:55:20 +00:00
|
|
|
host_details = {self.cluster_id: [{
|
|
|
|
"hostname": self._sys_cmd(['hostname']).decode("utf-8").strip(),
|
|
|
|
"port": 2049
|
|
|
|
}]}
|
2020-12-09 16:52:50 +00:00
|
|
|
host_ip = self._sys_cmd(['hostname', '-I']).decode("utf-8").split()
|
2020-06-30 10:55:20 +00:00
|
|
|
self.assertDictEqual(info_output, host_details)
|
2020-12-09 16:52:50 +00:00
|
|
|
self.assertTrue(any([ip in info_ip for ip in host_ip]))
|
2020-06-30 10:55:20 +00:00
|
|
|
self._test_delete_cluster()
|
2020-07-31 10:27:28 +00:00
|
|
|
|
|
|
|
def test_cluster_set_reset_user_config(self):
|
|
|
|
'''
|
|
|
|
Test cluster is created using user config and reverts back to default
|
|
|
|
config on reset.
|
|
|
|
'''
|
|
|
|
self._test_create_cluster()
|
|
|
|
|
|
|
|
pool = 'nfs-ganesha'
|
|
|
|
user_id = 'test'
|
|
|
|
fs_name = 'user_test_fs'
|
2020-08-17 08:37:06 +00:00
|
|
|
pseudo_path = '/ceph'
|
2020-07-31 10:27:28 +00:00
|
|
|
self._cmd('fs', 'volume', 'create', fs_name)
|
|
|
|
time.sleep(20)
|
|
|
|
key = self._cmd('auth', 'get-or-create-key', f'client.{user_id}', 'mon',
|
|
|
|
'allow r', 'osd',
|
|
|
|
f'allow rw pool={pool} namespace={self.cluster_id}, allow rw tag cephfs data={fs_name}',
|
|
|
|
'mds', f'allow rw path={self.path}').strip()
|
|
|
|
config = f""" LOG {{
|
|
|
|
Default_log_level = FULL_DEBUG;
|
|
|
|
}}
|
|
|
|
|
|
|
|
EXPORT {{
|
|
|
|
Export_Id = 100;
|
|
|
|
Transports = TCP;
|
|
|
|
Path = /;
|
2020-08-17 08:37:06 +00:00
|
|
|
Pseudo = {pseudo_path};
|
2020-07-31 10:27:28 +00:00
|
|
|
Protocols = 4;
|
|
|
|
Access_Type = RW;
|
|
|
|
Attr_Expiration_Time = 0;
|
|
|
|
Squash = None;
|
|
|
|
FSAL {{
|
|
|
|
Name = CEPH;
|
|
|
|
Filesystem = {fs_name};
|
|
|
|
User_Id = {user_id};
|
|
|
|
Secret_Access_Key = '{key}';
|
|
|
|
}}
|
|
|
|
}}"""
|
2020-08-17 08:37:06 +00:00
|
|
|
port, ip = self._get_port_ip_info()
|
2020-07-31 10:27:28 +00:00
|
|
|
self.ctx.cluster.run(args=['sudo', 'ceph', 'nfs', 'cluster', 'config',
|
|
|
|
'set', self.cluster_id, '-i', '-'], stdin=config)
|
|
|
|
time.sleep(30)
|
|
|
|
res = self._sys_cmd(['rados', '-p', pool, '-N', self.cluster_id, 'get',
|
|
|
|
f'userconf-nfs.ganesha-{user_id}', '-'])
|
|
|
|
self.assertEqual(config, res.decode('utf-8'))
|
2020-08-17 08:37:06 +00:00
|
|
|
self._test_mnt(pseudo_path, port, ip)
|
2020-07-31 10:27:28 +00:00
|
|
|
self._nfs_cmd('cluster', 'config', 'reset', self.cluster_id)
|
|
|
|
rados_obj_ls = self._sys_cmd(['rados', '-p', 'nfs-ganesha', '-N', self.cluster_id, 'ls'])
|
|
|
|
if b'conf-nfs' not in rados_obj_ls and b'userconf-nfs' in rados_obj_ls:
|
|
|
|
self.fail("User config not deleted")
|
|
|
|
time.sleep(30)
|
2020-08-17 08:37:06 +00:00
|
|
|
self._test_mnt(pseudo_path, port, ip, False)
|
2020-07-31 10:27:28 +00:00
|
|
|
self._cmd('fs', 'volume', 'rm', fs_name, '--yes-i-really-mean-it')
|
|
|
|
self._test_delete_cluster()
|
|
|
|
|
|
|
|
def test_cluster_set_user_config_with_non_existing_clusterid(self):
|
|
|
|
'''
|
|
|
|
Test setting user config for non-existing nfs cluster.
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
cluster_id = 'invalidtest'
|
|
|
|
self.ctx.cluster.run(args=['sudo', 'ceph', 'nfs', 'cluster',
|
|
|
|
'config', 'set', self.cluster_id, '-i', '-'], stdin='testing')
|
|
|
|
self.fail(f"User config set for non-existing cluster {cluster_id}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
|
|
|
def test_cluster_reset_user_config_with_non_existing_clusterid(self):
|
|
|
|
'''
|
|
|
|
Test resetting user config for non-existing nfs cluster.
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
cluster_id = 'invalidtest'
|
|
|
|
self._nfs_cmd('cluster', 'config', 'reset', cluster_id)
|
|
|
|
self.fail(f"User config reset for non-existing cluster {cluster_id}")
|
|
|
|
except CommandFailedError as e:
|
|
|
|
# Command should fail for test to pass
|
|
|
|
if e.exitstatus != errno.ENOENT:
|
|
|
|
raise
|