mirror of
https://github.com/ceph/ceph
synced 2025-02-23 19:17:37 +00:00
mgr/nfs: add --port to 'nfs cluster create' and port to 'nfs cluster info'
Fixes: https://tracker.ceph.com/issues/51787 Signed-off-by: Sage Weil <sage@newdream.net>
This commit is contained in:
parent
a5e8227a83
commit
8ebe341198
@ -29,7 +29,7 @@ Create NFS Ganesha Cluster
|
||||
|
||||
.. code:: bash
|
||||
|
||||
$ ceph nfs cluster create <cluster_id> [<placement>] [--ingress --virtual-ip <ip>]
|
||||
$ ceph nfs cluster create <cluster_id> [<placement>] [--port <port>] [--ingress --virtual-ip <ip>]
|
||||
|
||||
This creates a common recovery pool for all NFS Ganesha daemons, new user based on
|
||||
``cluster_id``, and a common NFS Ganesha config RADOS object.
|
||||
@ -58,6 +58,8 @@ cluster)::
|
||||
|
||||
"2 host1,host2"
|
||||
|
||||
NFS can be deployed on a port other than 2049 (the default) with ``--port <port>``.
|
||||
|
||||
To deploy NFS with a high-availability front-end (virtual IP and load balancer), add the
|
||||
``--ingress`` flag and specify a virtual IP address. This will deploy a combination
|
||||
of keepalived and haproxy to provide an high-availability NFS frontend for the NFS
|
||||
|
@ -14,7 +14,7 @@ tasks:
|
||||
- cephadm.shell:
|
||||
host.a:
|
||||
- ceph fs volume create foofs
|
||||
- ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
|
||||
- ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999
|
||||
- ceph nfs export create cephfs foofs foo --pseudo-path /fake
|
||||
|
||||
- cephadm.wait_for_service:
|
||||
@ -28,7 +28,7 @@ tasks:
|
||||
host.a:
|
||||
- mkdir /mnt/foo
|
||||
- sleep 5
|
||||
- mount -t nfs {{VIP0}}:/fake /mnt/foo
|
||||
- mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999
|
||||
- echo test > /mnt/foo/testfile
|
||||
- sync
|
||||
|
||||
|
@ -53,28 +53,37 @@ class NFSCluster:
|
||||
def _get_user_conf_obj_name(self, cluster_id: str) -> str:
|
||||
return f'userconf-nfs.{cluster_id}'
|
||||
|
||||
def _call_orch_apply_nfs(self, cluster_id: str, placement: Optional[str], virtual_ip: Optional[str] = None) -> None:
|
||||
def _call_orch_apply_nfs(
|
||||
self,
|
||||
cluster_id: str,
|
||||
placement: Optional[str],
|
||||
virtual_ip: Optional[str] = None,
|
||||
port: Optional[int] = None,
|
||||
) -> None:
|
||||
if not port:
|
||||
port = 2049 # default nfs port
|
||||
if virtual_ip:
|
||||
# nfs + ingress
|
||||
# run NFS on non-standard port
|
||||
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
|
||||
placement=PlacementSpec.from_string(placement),
|
||||
# use non-default port so we don't conflict with ingress
|
||||
port=12049)
|
||||
port=10000 + port) # semi-arbitrary, fix me someday
|
||||
completion = self.mgr.apply_nfs(spec)
|
||||
orchestrator.raise_if_exception(completion)
|
||||
ispec = IngressSpec(service_type='ingress',
|
||||
service_id='nfs.' + cluster_id,
|
||||
backend_service='nfs.' + cluster_id,
|
||||
frontend_port=2049, # default nfs port
|
||||
monitor_port=9049,
|
||||
frontend_port=port,
|
||||
monitor_port=7000 + port, # semi-arbitrary, fix me someday
|
||||
virtual_ip=virtual_ip)
|
||||
completion = self.mgr.apply_ingress(ispec)
|
||||
orchestrator.raise_if_exception(completion)
|
||||
else:
|
||||
# standalone nfs
|
||||
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
|
||||
placement=PlacementSpec.from_string(placement))
|
||||
placement=PlacementSpec.from_string(placement),
|
||||
port=port)
|
||||
completion = self.mgr.apply_nfs(spec)
|
||||
orchestrator.raise_if_exception(completion)
|
||||
|
||||
@ -88,11 +97,14 @@ class NFSCluster:
|
||||
log.info(f"Deleted {self._get_common_conf_obj_name(cluster_id)} object and all objects in "
|
||||
f"{cluster_id}")
|
||||
|
||||
def create_nfs_cluster(self,
|
||||
cluster_id: str,
|
||||
placement: Optional[str],
|
||||
virtual_ip: Optional[str],
|
||||
ingress: Optional[bool] = None) -> Tuple[int, str, str]:
|
||||
def create_nfs_cluster(
|
||||
self,
|
||||
cluster_id: str,
|
||||
placement: Optional[str],
|
||||
virtual_ip: Optional[str],
|
||||
ingress: Optional[bool] = None,
|
||||
port: Optional[int] = None,
|
||||
) -> Tuple[int, str, str]:
|
||||
try:
|
||||
if virtual_ip and not ingress:
|
||||
raise NFSInvalidOperation('virtual_ip can only be provided with ingress enabled')
|
||||
@ -108,7 +120,7 @@ class NFSCluster:
|
||||
self.create_empty_rados_obj(cluster_id)
|
||||
|
||||
if cluster_id not in available_clusters(self.mgr):
|
||||
self._call_orch_apply_nfs(cluster_id, placement, virtual_ip)
|
||||
self._call_orch_apply_nfs(cluster_id, placement, virtual_ip, port)
|
||||
return 0, "NFS Cluster Created Successfully", ""
|
||||
return 0, "", f"{cluster_id} cluster already exists"
|
||||
except Exception as e:
|
||||
|
@ -93,10 +93,12 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
cluster_id: str,
|
||||
placement: Optional[str] = None,
|
||||
ingress: Optional[bool] = None,
|
||||
virtual_ip: Optional[str] = None) -> Tuple[int, str, str]:
|
||||
virtual_ip: Optional[str] = None,
|
||||
port: Optional[int] = None) -> Tuple[int, str, str]:
|
||||
"""Create an NFS Cluster"""
|
||||
return self.nfs.create_nfs_cluster(cluster_id=cluster_id, placement=placement,
|
||||
virtual_ip=virtual_ip, ingress=ingress)
|
||||
virtual_ip=virtual_ip, ingress=ingress,
|
||||
port=port)
|
||||
|
||||
@CLICommand('nfs cluster rm', perm='rw')
|
||||
def _cmd_nfs_cluster_rm(self, cluster_id: str) -> Tuple[int, str, str]:
|
||||
|
Loading…
Reference in New Issue
Block a user