Merge pull request #52410 from phlogistonjohn/jjm-issue61852-haproxy-ips

cephadm: fix issue with limited haproxy_hosts IPs list

Reviewed-by: Adam King <adking@redhat.com>
This commit is contained in:
Adam King 2023-08-16 10:16:09 -04:00 committed by GitHub
commit c53f119348
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 56 additions and 13 deletions

View File

@ -1,4 +1,5 @@
import errno
import ipaddress
import logging
import os
import subprocess
@ -116,18 +117,7 @@ class NFSService(CephService):
"haproxy_hosts": [],
}
if spec.enable_haproxy_protocol:
# NB: Ideally, we would limit the list to IPs on hosts running
# haproxy/ingress only, but due to the nature of cephadm today
# we'd "only know the set of haproxy hosts after they've been
# deployed" (quoth @adk7398). As it is today we limit the list
# of hosts we know are managed by cephadm. That ought to be
# good enough to prevent acceping haproxy protocol messages
# from "rouge" systems that are not under our control. At
# least until we learn otherwise.
context["haproxy_hosts"] = [
self.mgr.inventory.get_addr(h)
for h in self.mgr.inventory.keys()
]
context["haproxy_hosts"] = self._haproxy_hosts()
logger.debug("selected haproxy_hosts: %r", context["haproxy_hosts"])
return self.mgr.template.render('services/nfs/ganesha.conf.j2', context)
@ -311,3 +301,31 @@ class NFSService(CephService):
stderr=subprocess.PIPE,
timeout=10
)
def _haproxy_hosts(self) -> List[str]:
# NB: Ideally, we would limit the list to IPs on hosts running
# haproxy/ingress only, but due to the nature of cephadm today
# we'd "only know the set of haproxy hosts after they've been
# deployed" (quoth @adk7398). As it is today we limit the list
# of hosts we know are managed by cephadm. That ought to be
# good enough to prevent acceping haproxy protocol messages
# from "rouge" systems that are not under our control. At
# least until we learn otherwise.
cluster_ips: List[str] = []
for host in self.mgr.inventory.keys():
default_addr = self.mgr.inventory.get_addr(host)
cluster_ips.append(default_addr)
nets = self.mgr.cache.networks.get(host)
if not nets:
continue
for subnet, iface in nets.items():
ip_subnet = ipaddress.ip_network(subnet)
if ipaddress.ip_address(default_addr) in ip_subnet:
continue # already present
if ip_subnet.is_loopback or ip_subnet.is_link_local:
continue # ignore special subnets
addrs: List[str] = sum((addr_list for addr_list in iface.values()), [])
if addrs:
# one address per interface/subnet is enough
cluster_ips.append(addrs[0])
return cluster_ips

View File

@ -2228,7 +2228,7 @@ class TestIngressService:
' Enable_RQUOTA = false;\n'
' Protocols = 4;\n'
' NFS_Port = 2049;\n'
' HAProxy_Hosts = 192.168.122.111, 192.168.122.222;\n'
' HAProxy_Hosts = 192.168.122.111, 10.10.2.20, 192.168.122.222;\n'
'}\n'
'\n'
'NFSv4 {\n'
@ -2301,6 +2301,31 @@ class TestIngressService:
ingress_svc = cephadm_module.cephadm_services['ingress']
nfs_svc = cephadm_module.cephadm_services['nfs']
# add host network info to one host to test the behavior of
# adding all known-good addresses of the host to the list.
cephadm_module.cache.update_host_networks('host1', {
# this one is additional
'10.10.2.0/24': {
'eth1': ['10.10.2.20']
},
# this is redundant and will be skipped
'192.168.122.0/24': {
'eth0': ['192.168.122.111']
},
# this is a link-local address and will be ignored
"fe80::/64": {
"veth0": [
"fe80::8cf5:25ff:fe1c:d963"
],
"eth0": [
"fe80::c7b:cbff:fef6:7370"
],
"eth1": [
"fe80::7201:25a7:390b:d9a7"
]
},
})
haproxy_generated_conf, _ = ingress_svc.haproxy_generate_config(
CephadmDaemonDeploySpec(
host='host1',