Merge PR #42691 into master

* refs/pull/42691/head:
	mgr/nfs: add --port to 'nfs cluster create' and port to 'nfs cluster info'
	qa/suites/orch/cephadm/smoke-roleless: test taking ganeshas offline
	qa/tasks/vip: exec with bash -ex
	qa/suites/orch/cephadm: separate test_nfs from test_orch_cli

Reviewed-by: Varsha Rao <varao@redhat.com>
This commit is contained in:
Sage Weil 2021-08-10 16:37:38 -04:00
commit 3331a0a7ea
7 changed files with 70 additions and 17 deletions

View File

@ -29,7 +29,7 @@ Create NFS Ganesha Cluster
.. code:: bash
$ ceph nfs cluster create <cluster_id> [<placement>] [--ingress --virtual-ip <ip>]
$ ceph nfs cluster create <cluster_id> [<placement>] [--port <port>] [--ingress --virtual-ip <ip>]
This creates a common recovery pool for all NFS Ganesha daemons, new user based on
``cluster_id``, and a common NFS Ganesha config RADOS object.
@ -58,6 +58,8 @@ cluster)::
"2 host1,host2"
NFS can be deployed on a port other than 2049 (the default) with ``--port <port>``.
To deploy NFS with a high-availability front-end (virtual IP and load balancer), add the
``--ingress`` flag and specify a virtual IP address. This will deploy a combination
of keepalived and haproxy to provide an high-availability NFS frontend for the NFS

View File

@ -14,7 +14,7 @@ tasks:
- cephadm.shell:
host.a:
- ceph fs volume create foofs
- ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
- ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999
- ceph nfs export create cephfs foofs foo --pseudo-path /fake
- cephadm.wait_for_service:
@ -28,7 +28,7 @@ tasks:
host.a:
- mkdir /mnt/foo
- sleep 5
- mount -t nfs {{VIP0}}:/fake /mnt/foo
- mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999
- echo test > /mnt/foo/testfile
- sync
@ -48,3 +48,23 @@ tasks:
ceph orch daemon start $haproxy
while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
done
# take each ganesha down in turn.
# simulate "failure" by deleting the container
- vip.exec:
all-hosts:
- |
echo "Check with $(hostname) ganesha(s) down..."
for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'`; do
cid=`echo $c | sed 's/@/-/'`
id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'`
fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-`
echo "Removing daemon $id fsid $fsid..."
sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id
echo "Waking up cephadm..."
sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh
while ! timeout 1 cat /mnt/foo/testfile ; do true ; done
echo "Mount is back!"
done

View File

@ -0,0 +1,17 @@
roles:
- - host.a
- osd.0
- osd.1
- osd.2
- mon.a
- mgr.a
- client.0
tasks:
- install:
- cephadm:
- cephadm.shell:
host.a:
- ceph orch apply mds a
- cephfs_test_runner:
modules:
- tasks.cephfs.test_nfs

View File

@ -14,5 +14,4 @@ tasks:
- ceph orch apply mds a
- cephfs_test_runner:
modules:
- tasks.cephfs.test_nfs
- tasks.cephadm_cases.test_cli

View File

@ -62,6 +62,7 @@ def exec(ctx, config):
'sudo',
'TESTDIR={tdir}'.format(tdir=testdir),
'bash',
'-ex',
'-c',
subst_vip(ctx, c)],
)

View File

@ -53,28 +53,37 @@ class NFSCluster:
def _get_user_conf_obj_name(self, cluster_id: str) -> str:
return f'userconf-nfs.{cluster_id}'
def _call_orch_apply_nfs(self, cluster_id: str, placement: Optional[str], virtual_ip: Optional[str] = None) -> None:
def _call_orch_apply_nfs(
self,
cluster_id: str,
placement: Optional[str],
virtual_ip: Optional[str] = None,
port: Optional[int] = None,
) -> None:
if not port:
port = 2049 # default nfs port
if virtual_ip:
# nfs + ingress
# run NFS on non-standard port
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
placement=PlacementSpec.from_string(placement),
# use non-default port so we don't conflict with ingress
port=12049)
port=10000 + port) # semi-arbitrary, fix me someday
completion = self.mgr.apply_nfs(spec)
orchestrator.raise_if_exception(completion)
ispec = IngressSpec(service_type='ingress',
service_id='nfs.' + cluster_id,
backend_service='nfs.' + cluster_id,
frontend_port=2049, # default nfs port
monitor_port=9049,
frontend_port=port,
monitor_port=7000 + port, # semi-arbitrary, fix me someday
virtual_ip=virtual_ip)
completion = self.mgr.apply_ingress(ispec)
orchestrator.raise_if_exception(completion)
else:
# standalone nfs
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
placement=PlacementSpec.from_string(placement))
placement=PlacementSpec.from_string(placement),
port=port)
completion = self.mgr.apply_nfs(spec)
orchestrator.raise_if_exception(completion)
@ -88,11 +97,14 @@ class NFSCluster:
log.info(f"Deleted {self._get_common_conf_obj_name(cluster_id)} object and all objects in "
f"{cluster_id}")
def create_nfs_cluster(self,
cluster_id: str,
placement: Optional[str],
virtual_ip: Optional[str],
ingress: Optional[bool] = None) -> Tuple[int, str, str]:
def create_nfs_cluster(
self,
cluster_id: str,
placement: Optional[str],
virtual_ip: Optional[str],
ingress: Optional[bool] = None,
port: Optional[int] = None,
) -> Tuple[int, str, str]:
try:
if virtual_ip and not ingress:
raise NFSInvalidOperation('virtual_ip can only be provided with ingress enabled')
@ -108,7 +120,7 @@ class NFSCluster:
self.create_empty_rados_obj(cluster_id)
if cluster_id not in available_clusters(self.mgr):
self._call_orch_apply_nfs(cluster_id, placement, virtual_ip)
self._call_orch_apply_nfs(cluster_id, placement, virtual_ip, port)
return 0, "NFS Cluster Created Successfully", ""
return 0, "", f"{cluster_id} cluster already exists"
except Exception as e:

View File

@ -93,10 +93,12 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
cluster_id: str,
placement: Optional[str] = None,
ingress: Optional[bool] = None,
virtual_ip: Optional[str] = None) -> Tuple[int, str, str]:
virtual_ip: Optional[str] = None,
port: Optional[int] = None) -> Tuple[int, str, str]:
"""Create an NFS Cluster"""
return self.nfs.create_nfs_cluster(cluster_id=cluster_id, placement=placement,
virtual_ip=virtual_ip, ingress=ingress)
virtual_ip=virtual_ip, ingress=ingress,
port=port)
@CLICommand('nfs cluster rm', perm='rw')
def _cmd_nfs_cluster_rm(self, cluster_id: str) -> Tuple[int, str, str]: