diff --git a/.github/actions/install-lxd/action.yaml b/.github/actions/install-lxd/action.yaml index a24800774..12c96019a 100644 --- a/.github/actions/install-lxd/action.yaml +++ b/.github/actions/install-lxd/action.yaml @@ -32,5 +32,16 @@ runs: - name: Apply Docker iptables workaround shell: bash run: | - sudo iptables -I DOCKER-USER -i lxdbr0 -j ACCEPT - sudo iptables -I DOCKER-USER -o lxdbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + set -x + ip a + ip r + + bridges=('lxdbr0' 'dualstack-br0' 'ipv6-br0') + for i in ${bridges[@]}; do + set +e + sudo iptables -I DOCKER-USER -i $i -j ACCEPT + sudo ip6tables -I DOCKER-USER -i $i -j ACCEPT + sudo iptables -I DOCKER-USER -o $i -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + sudo ip6tables -I DOCKER-USER -o $i -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + set -e + done diff --git a/tests/integration/tests/test_loadbalancer.py b/tests/integration/tests/test_loadbalancer.py index 7182fce6d..386e1487b 100644 --- a/tests/integration/tests/test_loadbalancer.py +++ b/tests/integration/tests/test_loadbalancer.py @@ -2,6 +2,7 @@ # Copyright 2025 Canonical, Ltd. # import logging +from enum import Enum from pathlib import Path from typing import List @@ -12,25 +13,84 @@ LOG = logging.getLogger(__name__) +class K8sNetType(Enum): + ipv4 = "ipv4" + ipv6 = "ipv6" + dualstack = "dualstack" + + @pytest.mark.node_count(2) @pytest.mark.tags(tags.PULL_REQUEST) -def test_loadbalancer(instances: List[harness.Instance]): - instance = instances[0] +@pytest.mark.disable_k8s_bootstrapping() +def test_loadbalancer_ipv4(instances: List[harness.Instance]): + _test_loadbalancer(instances, k8s_net_type=K8sNetType.ipv4) + + +@pytest.mark.node_count(2) +@pytest.mark.tags(tags.PULL_REQUEST) +@pytest.mark.disable_k8s_bootstrapping() +def test_loadbalancer_ipv6_only(instances: List[harness.Instance]): + pytest.xfail( + "Cilium ipv6 only unsupported: https://github.com/cilium/cilium/issues/15082" + ) + _test_loadbalancer(instances, k8s_net_type=K8sNetType.ipv6) + + +@pytest.mark.node_count(2) +@pytest.mark.tags(tags.PULL_REQUEST) +@pytest.mark.disable_k8s_bootstrapping() +@pytest.mark.dualstack() +@pytest.mark.network_type("dualstack") +def test_loadbalancer_ipv6_dualstack(instances: List[harness.Instance]): + _test_loadbalancer(instances, k8s_net_type=K8sNetType.dualstack) + +def _test_loadbalancer(instances: List[harness.Instance], k8s_net_type: K8sNetType): + instance = instances[0] tester_instance = instances[1] - instance_default_ip = util.get_default_ip(instance) - tester_instance_default_ip = util.get_default_ip(tester_instance) + if k8s_net_type == K8sNetType.ipv6: + bootstrap_config = (MANIFESTS_DIR / "bootstrap-ipv6-only.yaml").read_text() + instance.exec( + ["k8s", "bootstrap", "--file", "-", "--address", "::/0"], + input=str.encode(bootstrap_config), + ) + elif k8s_net_type == K8sNetType.dualstack: + bootstrap_config = (MANIFESTS_DIR / "bootstrap-dualstack.yaml").read_text() + instance.exec( + ["k8s", "bootstrap", "--file", "-"], + input=str.encode(bootstrap_config), + ) + else: + instance.exec(["k8s", "bootstrap"]) - instance_default_cidr = util.get_default_cidr(instance, instance_default_ip) + lb_cidrs = [] - lb_cidr = util.find_suitable_cidr( - parent_cidr=instance_default_cidr, - excluded_ips=[instance_default_ip, tester_instance_default_ip], - ) + def get_lb_cidr(ipv6_cidr: bool): + instance_default_ip = util.get_default_ip(instance, ipv6=ipv6_cidr) + tester_instance_default_ip = util.get_default_ip( + tester_instance, ipv6=ipv6_cidr + ) + instance_default_cidr = util.get_default_cidr(instance, instance_default_ip) + lb_cidr = util.find_suitable_cidr( + parent_cidr=instance_default_cidr, + excluded_ips=[instance_default_ip, tester_instance_default_ip], + ) + return lb_cidr + + if k8s_net_type in (K8sNetType.ipv4, K8sNetType.dualstack): + lb_cidrs.append(get_lb_cidr(ipv6_cidr=False)) + if k8s_net_type in (K8sNetType.ipv6, K8sNetType.dualstack): + lb_cidrs.append(get_lb_cidr(ipv6_cidr=True)) + lb_cidr_str = ",".join(lb_cidrs) instance.exec( - ["k8s", "set", f"load-balancer.cidrs={lb_cidr}", "load-balancer.l2-mode=true"] + [ + "k8s", + "set", + f"load-balancer.cidrs={lb_cidr_str}", + "load-balancer.l2-mode=true", + ] ) instance.exec(["k8s", "enable", "load-balancer"]) diff --git a/tests/integration/tests/test_strict_interfaces.py b/tests/integration/tests/test_strict_interfaces.py index fb722f67f..101e75503 100644 --- a/tests/integration/tests/test_strict_interfaces.py +++ b/tests/integration/tests/test_strict_interfaces.py @@ -17,6 +17,8 @@ ) @pytest.mark.tags(tags.WEEKLY) def test_strict_interfaces(instances: List[harness.Instance], tmp_path): + pytest.xfail("Strict channel tests are currently skipped.") + channels = config.STRICT_INTERFACE_CHANNELS cp = instances[0] current_channel = channels[0] diff --git a/tests/integration/tests/test_util/util.py b/tests/integration/tests/test_util/util.py index 3c98c91ad..52e6e8a95 100644 --- a/tests/integration/tests/test_util/util.py +++ b/tests/integration/tests/test_util/util.py @@ -357,26 +357,40 @@ def join_cluster(instance: harness.Instance, join_token: str): instance.exec(["k8s", "join-cluster", join_token]) +def is_ipv6(ip: str) -> bool: + addr = ipaddress.ip_address(ip) + return isinstance(addr, ipaddress.IPv6Address) + + def get_default_cidr(instance: harness.Instance, instance_default_ip: str): # ---- # 1: lo inet 127.0.0.1/8 scope host lo ..... # 28: eth0 inet 10.42.254.197/24 metric 100 brd 10.42.254.255 scope global dynamic eth0 .... # ---- # Fetching the cidr for the default interface by matching with instance ip from the output - p = instance.exec(["ip", "-o", "-f", "inet", "addr", "show"], capture_output=True) + addr_family = "-6" if is_ipv6(instance_default_ip) else "-4" + p = instance.exec(["ip", "-o", addr_family, "addr", "show"], capture_output=True) out = p.stdout.decode().split(" ") return [i for i in out if instance_default_ip in i][0] -def get_default_ip(instance: harness.Instance): +def get_default_ip(instance: harness.Instance, ipv6=False): # --- # default via 10.42.254.1 dev eth0 proto dhcp src 10.42.254.197 metric 100 # --- # Fetching the default IP address from the output, e.g. 10.42.254.197 - p = instance.exec( - ["ip", "-o", "-4", "route", "show", "to", "default"], capture_output=True - ) - return p.stdout.decode().split(" ")[8] + if ipv6: + p = instance.exec( + ["ip", "-json", "-6", "addr", "show", "scope", "global"], + capture_output=True, + ) + addr_json = json.loads(p.stdout.decode()) + return addr_json[0]["addr_info"][0]["local"] + else: + p = instance.exec( + ["ip", "-o", "-4", "route", "show", "to", "default"], capture_output=True + ) + return p.stdout.decode().split(" ")[8] def get_global_unicast_ipv6(instance: harness.Instance, interface="eth0") -> str | None: @@ -519,14 +533,20 @@ def previous_track(snap_version: str) -> str: def find_suitable_cidr(parent_cidr: str, excluded_ips: List[str]): """Find a suitable CIDR for LoadBalancer services""" - net = ipaddress.IPv4Network(parent_cidr, False) + net = ipaddress.ip_network(parent_cidr, False) + ipv6 = isinstance(net, ipaddress.IPv6Network) + if ipv6: + ip_range = 126 + else: + ip_range = 30 # Starting from the first IP address from the parent cidr, # we search for a /30 cidr block(4 total ips, 2 available) # that doesn't contain the excluded ips to avoid collisions - # /30 because this is the smallest CIDR cilium hands out IPs from + # /30 because this is the smallest CIDR cilium hands out IPs from. + # For ipv6, we use a /126 block that contains 4 total ips. for i in range(4, 255, 4): - lb_net = ipaddress.IPv4Network(f"{str(net[0]+i)}/30", False) + lb_net = ipaddress.ip_network(f"{str(net[0]+i)}/{ip_range}", False) contains_excluded = False for excluded in excluded_ips: