From 7373f0ce7a62cdcca17a48f2b03a2e66b03d4f6c Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Tue, 30 Jan 2024 02:54:55 +0100 Subject: [PATCH 01/23] Use k3s with containerd (#2529) Seems like in 1.29 there are issues with cri-dockerd. There's no reason to use Docker anymore and this commit makes sure k3s uses containerd. --- .../install-docker-registry/tasks/main.yml | 12 ++++++++++-- .../templates/ca-config.json.j2 | 6 ++++-- .../roles/install-k3s/tasks/main.yaml | 18 +++++++----------- tests/playbooks/test-csi-cinder-e2e.yaml | 4 ++-- tests/playbooks/test-csi-manila-e2e.yaml | 4 ++-- tests/playbooks/test-occm-e2e.yaml | 4 ++-- 6 files changed, 27 insertions(+), 21 deletions(-) diff --git a/tests/playbooks/roles/install-docker-registry/tasks/main.yml b/tests/playbooks/roles/install-docker-registry/tasks/main.yml index 7ef3e67319..9c1dfe92aa 100644 --- a/tests/playbooks/roles/install-docker-registry/tasks/main.yml +++ b/tests/playbooks/roles/install-docker-registry/tasks/main.yml @@ -33,6 +33,14 @@ cmd: | cfssl gencert -initca ca-csr.json | cfssljson -bare ca - +- name: Create server certificate + shell: + executable: /bin/bash + chdir: "{{ ansible_user_dir }}/certs" + creates: "{{ ansible_user_dir }}/certs/server.pem" + cmd: | + cfssl gencert -config ca-config.json -profile server -ca ./ca.pem -ca-key ./ca-key.pem ca-csr.json | cfssljson -bare server + - name: Run docker registry container shell: executable: /bin/bash @@ -44,8 +52,8 @@ --name registry \ -v "{{ ansible_user_dir }}/certs":/certs \ -e REGISTRY_HTTP_ADDR=0.0.0.0:443 \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/ca.pem \ - -e REGISTRY_HTTP_TLS_KEY=/certs/ca-key.pem \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/server.pem \ + -e REGISTRY_HTTP_TLS_KEY=/certs/server-key.pem \ -p 443:443 \ registry:2 fi diff --git a/tests/playbooks/roles/install-docker-registry/templates/ca-config.json.j2 b/tests/playbooks/roles/install-docker-registry/templates/ca-config.json.j2 index 5710c586f4..08e3f11b90 100644 --- a/tests/playbooks/roles/install-docker-registry/templates/ca-config.json.j2 +++ b/tests/playbooks/roles/install-docker-registry/templates/ca-config.json.j2 @@ -9,7 +9,8 @@ "usages": [ "signing", "key encipherment", - "server auth" + "server auth", + "digital signature" ] }, "client": { @@ -17,7 +18,8 @@ "usages": [ "signing", "key encipherment", - "client auth" + "client auth", + "digital signature" ] } } diff --git a/tests/playbooks/roles/install-k3s/tasks/main.yaml b/tests/playbooks/roles/install-k3s/tasks/main.yaml index cd9acb7675..ab8f8a4c97 100644 --- a/tests/playbooks/roles/install-k3s/tasks/main.yaml +++ b/tests/playbooks/roles/install-k3s/tasks/main.yaml @@ -88,19 +88,17 @@ manage_etc_hosts: "localhost" package_update: true runcmd: - - curl -sSL https://get.docker.com/ | sh + - update-ca-certificates - mkdir -p /var/lib/rancher/k3s/agent/images/ - curl -sSL https://github.com/k3s-io/k3s/releases/download/{{ k3s_release }}/k3s-airgap-images-amd64.tar -o /var/lib/rancher/k3s/agent/images/k3s-airgap-images.tar - curl -sSL https://github.com/k3s-io/k3s/releases/download/{{ k3s_release }}/k3s -o /usr/local/bin/k3s - curl -sSL https://get.k3s.io -o /var/lib/rancher/k3s/install.sh - chmod u+x /var/lib/rancher/k3s/install.sh /usr/local/bin/k3s - - INSTALL_K3S_SKIP_DOWNLOAD=true /var/lib/rancher/k3s/install.sh --docker --disable traefik --disable metrics-server --disable servicelb --disable-cloud-controller --kubelet-arg="cloud-provider=external" --tls-san {{ k3s_fip }} --token {{ cluster_token }} + - INSTALL_K3S_SKIP_DOWNLOAD=true /var/lib/rancher/k3s/install.sh --disable traefik --disable metrics-server --disable servicelb --disable-cloud-controller --kubelet-arg="cloud-provider=external" --tls-san {{ k3s_fip }} --token {{ cluster_token }} write_files: - - path: /etc/docker/daemon.json + - path: /usr/local/share/ca-certificates/registry-ca.crt content: | - { - "insecure-registries" : ["{{ ansible_default_ipv4.address }}"] - } + $(awk '{printf " %s\n", $0}' < /root/certs/ca.pem) EOF # Create k3s master @@ -126,7 +124,7 @@ manage_etc_hosts: "localhost" package_update: true runcmd: - - curl -sSL https://get.docker.com/ | sh + - update-ca-certificates - mkdir -p /var/lib/rancher/k3s/agent/images/ - curl -sSL https://github.com/k3s-io/k3s/releases/download/{{ release.stdout }}/k3s-airgap-images-amd64.tar -o /var/lib/rancher/k3s/agent/images/k3s-airgap-images.tar - curl -sSL https://github.com/k3s-io/k3s/releases/download/{{ release.stdout }}/k3s -o /usr/local/bin/k3s @@ -134,11 +132,9 @@ - chmod u+x /var/lib/rancher/k3s/install.sh /usr/local/bin/k3s - INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://{{ k3s_fip }}:6443 K3S_TOKEN={{ cluster_token }} /var/lib/rancher/k3s/install.sh --docker --kubelet-arg="cloud-provider=external" write_files: - - path: /etc/docker/daemon.json + - path: /usr/local/share/ca-certificates/registry-ca.crt content: | - { - "insecure-registries" : ["{{ ansible_default_ipv4.address }}"] - } + $(awk '{printf " %s\n", $0}' < /root/certs/ca.pem) EOF # Create k3s worker diff --git a/tests/playbooks/test-csi-cinder-e2e.yaml b/tests/playbooks/test-csi-cinder-e2e.yaml index 52fec9d184..ac9a7a401a 100644 --- a/tests/playbooks/test-csi-cinder-e2e.yaml +++ b/tests/playbooks/test-csi-cinder-e2e.yaml @@ -17,11 +17,11 @@ - neutron - glance - cinder - - role: install-k3s - worker_node_count: 0 - role: install-docker - role: install-docker-registry cert_hosts: ' ["{{ ansible_default_ipv4.address }}"]' + - role: install-k3s + worker_node_count: 0 - role: install-cpo-occm run_e2e: false environment: "{{ global_env }}" diff --git a/tests/playbooks/test-csi-manila-e2e.yaml b/tests/playbooks/test-csi-manila-e2e.yaml index 38b4c904a1..468e9d78f3 100644 --- a/tests/playbooks/test-csi-manila-e2e.yaml +++ b/tests/playbooks/test-csi-manila-e2e.yaml @@ -16,11 +16,11 @@ - neutron - glance - manila - - role: install-k3s - worker_node_count: 0 - role: install-docker - role: install-docker-registry cert_hosts: ' ["{{ ansible_default_ipv4.address }}"]' + - role: install-k3s + worker_node_count: 0 - role: install-cpo-occm run_e2e: false - role: install-helm diff --git a/tests/playbooks/test-occm-e2e.yaml b/tests/playbooks/test-occm-e2e.yaml index b52904770c..54494218e2 100644 --- a/tests/playbooks/test-occm-e2e.yaml +++ b/tests/playbooks/test-occm-e2e.yaml @@ -20,11 +20,11 @@ - octavia - ovn-octavia - barbican - - role: install-k3s - worker_node_count: 0 - role: install-docker - role: install-docker-registry cert_hosts: ' ["{{ ansible_default_ipv4.address }}"]' + - role: install-k3s + worker_node_count: 0 - role: install-cpo-occm run_e2e: "{{ run_e2e }}" octavia_provider: "{{ octavia_provider }}" From 119a3ea8abc76f0c2f397e3439040aa57d4b1a3a Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Tue, 30 Jan 2024 08:59:34 +0100 Subject: [PATCH 02/23] CI: Fetch DevStack and k3s logs (#2527) This commit makes sure we're fetching and putting the logs of the DevStack services as well as k3s into the artifacts for every CI job. --- tests/ci-csi-cinder-e2e.sh | 12 +++++++ tests/ci-csi-manila-e2e.sh | 12 +++++++ tests/ci-occm-e2e.sh | 15 +++++--- tests/playbooks/fetch-logs.yaml | 11 ++++++ tests/playbooks/roles/fetch-logs/README.md | 1 + .../roles/fetch-logs/defaults/main.yaml | 2 ++ .../roles/fetch-logs/tasks/main.yaml | 34 +++++++++++++++++++ 7 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 tests/playbooks/fetch-logs.yaml create mode 100644 tests/playbooks/roles/fetch-logs/README.md create mode 100644 tests/playbooks/roles/fetch-logs/defaults/main.yaml create mode 100644 tests/playbooks/roles/fetch-logs/tasks/main.yaml diff --git a/tests/ci-csi-cinder-e2e.sh b/tests/ci-csi-cinder-e2e.sh index c3b6469c58..2c5bbfe33d 100755 --- a/tests/ci-csi-cinder-e2e.sh +++ b/tests/ci-csi-cinder-e2e.sh @@ -110,6 +110,18 @@ ansible-playbook -v \ tests/playbooks/test-csi-cinder-e2e.yaml exit_code=$? +# Fetch logs for debugging purpose +ansible-playbook -v \ + --user ${USERNAME} \ + --private-key ~/.ssh/google_compute_engine \ + --inventory ${PUBLIC_IP}, \ + --ssh-common-args "-o StrictHostKeyChecking=no" \ + tests/playbooks/fetch-logs.yaml + + scp -i ~/.ssh/google_compute_engine \ + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + -r ${USERNAME}@${PUBLIC_IP}:~/logs $ARTIFACTS/logs/devstack || true + # Fetch cinder-csi tests logs for debugging purpose scp -i ~/.ssh/google_compute_engine \ -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ diff --git a/tests/ci-csi-manila-e2e.sh b/tests/ci-csi-manila-e2e.sh index 63bf7647ba..7b285320ea 100755 --- a/tests/ci-csi-manila-e2e.sh +++ b/tests/ci-csi-manila-e2e.sh @@ -110,6 +110,18 @@ ansible-playbook -v \ tests/playbooks/test-csi-manila-e2e.yaml exit_code=$? +# Fetch logs for debugging purpose +ansible-playbook -v \ + --user ${USERNAME} \ + --private-key ~/.ssh/google_compute_engine \ + --inventory ${PUBLIC_IP}, \ + --ssh-common-args "-o StrictHostKeyChecking=no" \ + tests/playbooks/fetch-logs.yaml + + scp -i ~/.ssh/google_compute_engine \ + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + -r ${USERNAME}@${PUBLIC_IP}:~/logs $ARTIFACTS/logs/devstack || true + # Fetch manila-csi tests results scp -i ~/.ssh/google_compute_engine \ -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ diff --git a/tests/ci-occm-e2e.sh b/tests/ci-occm-e2e.sh index 71a6a9f85c..4e60b2d363 100755 --- a/tests/ci-occm-e2e.sh +++ b/tests/ci-occm-e2e.sh @@ -113,10 +113,17 @@ ansible-playbook -v \ -e run_e2e=true exit_code=$? -# Fetch devstack logs for debugging purpose -# scp -i ~/.ssh/google_compute_engine \ -# -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ -# -r ${USERNAME}@${PUBLIC_IP}:/opt/stack/logs $ARTIFACTS/logs/devstack || true +# Fetch logs for debugging purpose +ansible-playbook -v \ + --user ${USERNAME} \ + --private-key ~/.ssh/google_compute_engine \ + --inventory ${PUBLIC_IP}, \ + --ssh-common-args "-o StrictHostKeyChecking=no" \ + tests/playbooks/fetch-logs.yaml + + scp -i ~/.ssh/google_compute_engine \ + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + -r ${USERNAME}@${PUBLIC_IP}:~/logs $ARTIFACTS/logs/devstack || true # Fetch octavia amphora image build logs for debugging purpose scp -i ~/.ssh/google_compute_engine \ diff --git a/tests/playbooks/fetch-logs.yaml b/tests/playbooks/fetch-logs.yaml new file mode 100644 index 0000000000..41ac53b92c --- /dev/null +++ b/tests/playbooks/fetch-logs.yaml @@ -0,0 +1,11 @@ +- hosts: all + become: true + become_method: sudo + gather_facts: true + + vars: + user: stack + devstack_workdir: /home/{{ user }}/devstack + + roles: + - role: fetch-logs diff --git a/tests/playbooks/roles/fetch-logs/README.md b/tests/playbooks/roles/fetch-logs/README.md new file mode 100644 index 0000000000..c3b52d203e --- /dev/null +++ b/tests/playbooks/roles/fetch-logs/README.md @@ -0,0 +1 @@ +The ansible role gets logs of various services running in the CI for further analysis diff --git a/tests/playbooks/roles/fetch-logs/defaults/main.yaml b/tests/playbooks/roles/fetch-logs/defaults/main.yaml new file mode 100644 index 0000000000..cef6d784b2 --- /dev/null +++ b/tests/playbooks/roles/fetch-logs/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +master_port_name: "k3s_master" diff --git a/tests/playbooks/roles/fetch-logs/tasks/main.yaml b/tests/playbooks/roles/fetch-logs/tasks/main.yaml new file mode 100644 index 0000000000..7cf66c76c5 --- /dev/null +++ b/tests/playbooks/roles/fetch-logs/tasks/main.yaml @@ -0,0 +1,34 @@ +- name: Get k3s master floating IP + shell: + executable: /bin/bash + cmd: | + set +x; source {{ devstack_workdir }}/openrc demo demo > /dev/null + openstack floating ip list --port {{ master_port_name }} -c "Floating IP Address" -f value + register: fip + +- name: Set fact for k3s master floating IP + set_fact: + k3s_fip: "{{ fip.stdout }}" + +- name: Creates directory + ansible.builtin.file: + path: "/root/logs" + state: directory + +- name: Fetch k3s logs + shell: + executable: /bin/bash + cmd: | + ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ ansible_user_dir }}/.ssh/id_rsa ubuntu@{{ k3s_fip }} sudo journalctl -u k3s.service --no-pager > /root/logs/k3s.log + +- name: Fetch DevStack logs + shell: + executable: /bin/bash + cmd: | + set +x; + units=`systemctl list-units --type service | awk '{ print $1 }' | grep devstack\@` + for unit in $units; do + filename=${unit#"devstack@"} + filename=${filename%".service"} + sudo journalctl -u $unit --no-pager > /root/logs/${filename}.log + done; From 73d26619f77898b97607e90efa3a6109d8f5f4b6 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Wed, 31 Jan 2024 07:36:32 +0100 Subject: [PATCH 03/23] Ingress: Fix listener timeout updates (#2518) PR #2468 used an incorrect condition when updating the listener timeout values and only updates them when listener's allowedCIDRs are updated. This commit fixes this by making sure timeout values are checked too. --- pkg/ingress/controller/openstack/octavia.go | 27 ++++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/pkg/ingress/controller/openstack/octavia.go b/pkg/ingress/controller/openstack/octavia.go index 1705cc36cb..7924595c07 100644 --- a/pkg/ingress/controller/openstack/octavia.go +++ b/pkg/ingress/controller/openstack/octavia.go @@ -365,19 +365,28 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin log.WithFields(log.Fields{"lbID": lbID, "listenerName": name}).Info("listener created") } else { + updateOpts := listeners.UpdateOpts{} if len(listenerAllowedCIDRs) > 0 && !reflect.DeepEqual(listener.AllowedCIDRs, listenerAllowedCIDRs) { - _, err := listeners.Update(os.Octavia, listener.ID, listeners.UpdateOpts{ - AllowedCIDRs: &listenerAllowedCIDRs, - TimeoutClientData: timeoutClientData, - TimeoutMemberData: timeoutMemberData, - TimeoutMemberConnect: timeoutMemberConnect, - TimeoutTCPInspect: timeoutTCPInspect, - }).Extract() + updateOpts.AllowedCIDRs = &listenerAllowedCIDRs + } + + if timeoutClientData == nil && listener.TimeoutClientData != 0 || timeoutClientData != nil && *timeoutClientData != listener.TimeoutClientData || + timeoutMemberData == nil && listener.TimeoutMemberData != 0 || timeoutMemberData != nil && *timeoutMemberData != listener.TimeoutMemberData || + timeoutMemberConnect == nil && listener.TimeoutMemberConnect != 0 || timeoutMemberData != nil && *timeoutMemberConnect != listener.TimeoutMemberConnect || + timeoutTCPInspect == nil && listener.TimeoutTCPInspect != 0 || timeoutTCPInspect != nil && *timeoutTCPInspect != listener.TimeoutTCPInspect { + updateOpts.TimeoutClientData = timeoutClientData + updateOpts.TimeoutMemberData = timeoutMemberData + updateOpts.TimeoutMemberConnect = timeoutMemberConnect + updateOpts.TimeoutTCPInspect = timeoutTCPInspect + } + + if updateOpts != (listeners.UpdateOpts{}) { + _, err := listeners.Update(os.Octavia, listener.ID, updateOpts).Extract() if err != nil { - return nil, fmt.Errorf("failed to update listener allowed CIDRs: %v", err) + return nil, fmt.Errorf("failed to update listener options: %v", err) } - log.WithFields(log.Fields{"listenerID": listener.ID}).Debug("listener allowed CIDRs updated") + log.WithFields(log.Fields{"listenerID": listener.ID}).Debug("listener options updated") } } From 7f1daa8b0edacabda1a3bd16f87377a5e767e241 Mon Sep 17 00:00:00 2001 From: Tobias Wolf Date: Fri, 2 Feb 2024 17:08:50 +0100 Subject: [PATCH 04/23] Add support to only run selected CSI services (#2316) * Add support to only run selected CSI services of the cinder CSI driver * Add support to only run selected CSI services of the manila CSI driver * Clean up source files to successfully complete linting * Update description of the `nodeid` command line parameter * Update documentation for the CSI service parameters This commit updates the documentation for the CSI controller and node service providing parameters. --- cmd/cinder-csi-plugin/main.go | 37 +++++++---- cmd/manila-csi-plugin/main.go | 62 ++++++++++++------- .../using-cinder-csi-plugin.md | 27 +++++--- .../using-manila-csi-plugin.md | 6 +- pkg/csi/cinder/controllerserver_test.go | 18 +----- pkg/csi/cinder/driver.go | 27 +++++--- pkg/csi/cinder/driver_test.go | 3 +- pkg/csi/cinder/identityserver_test.go | 2 +- pkg/csi/cinder/nodeserver_test.go | 7 +-- pkg/csi/cinder/utils.go | 3 +- pkg/csi/manila/driver.go | 43 ++++++++++--- tests/sanity/cinder/sanity_test.go | 6 +- tests/sanity/manila/sanity_test.go | 14 ++++- 13 files changed, 167 insertions(+), 88 deletions(-) diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index e59502a5cc..1673bda4e1 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -31,11 +31,13 @@ import ( ) var ( - endpoint string - nodeID string - cloudConfig []string - cluster string - httpEndpoint string + endpoint string + nodeID string + cloudConfig []string + cluster string + httpEndpoint string + provideControllerService bool + provideNodeService bool ) func main() { @@ -65,6 +67,10 @@ func main() { cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") + + cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)") + cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)") + openstack.AddExtraFlags(pflag.CommandLine) code := cli.Run(cmd) @@ -73,19 +79,28 @@ func main() { func handle() { // Initialize cloud - d := cinder.NewDriver(endpoint, cluster) + d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) + openstack.InitOpenStackProvider(cloudConfig, httpEndpoint) cloud, err := openstack.GetOpenStackProvider() if err != nil { klog.Warningf("Failed to GetOpenStackProvider: %v", err) return } - //Initialize mount - mount := mount.GetMountProvider() - //Initialize Metadata - metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) + if provideControllerService { + d.SetupControllerService(cloud) + } + + if provideNodeService { + //Initialize mount + mount := mount.GetMountProvider() + + //Initialize Metadata + metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) + + d.SetupNodeService(cloud, mount, metadata) + } - d.SetupDriver(cloud, mount, metadata) d.Run() } diff --git a/cmd/manila-csi-plugin/main.go b/cmd/manila-csi-plugin/main.go index 1087a3326c..0c0b1895bb 100644 --- a/cmd/manila-csi-plugin/main.go +++ b/cmd/manila-csi-plugin/main.go @@ -45,9 +45,11 @@ var ( clusterID string // Runtime options - endpoint string - runtimeConfigFile string - userAgentData []string + endpoint string + runtimeConfigFile string + userAgentData []string + provideControllerService bool + provideNodeService bool ) func validateShareProtocolSelector(v string) error { @@ -75,23 +77,39 @@ func main() { manilaClientBuilder := &manilaclient.ClientBuilder{UserAgent: "manila-csi-plugin", ExtraUserAgentData: userAgentData} csiClientBuilder := &csiclient.ClientBuilder{} - d, err := manila.NewDriver( - &manila.DriverOpts{ - DriverName: driverName, - NodeID: nodeID, - NodeAZ: nodeAZ, - WithTopology: withTopology, - ShareProto: protoSelector, - ServerCSIEndpoint: endpoint, - FwdCSIEndpoint: fwdEndpoint, - ManilaClientBuilder: manilaClientBuilder, - CSIClientBuilder: csiClientBuilder, - ClusterID: clusterID, - }, - ) + opts := &manila.DriverOpts{ + DriverName: driverName, + WithTopology: withTopology, + ShareProto: protoSelector, + ServerCSIEndpoint: endpoint, + FwdCSIEndpoint: fwdEndpoint, + ManilaClientBuilder: manilaClientBuilder, + CSIClientBuilder: csiClientBuilder, + ClusterID: clusterID, + } + + if provideNodeService { + opts.NodeID = nodeID + opts.NodeAZ = nodeAZ + } + d, err := manila.NewDriver(opts) if err != nil { - klog.Fatalf("driver initialization failed: %v", err) + klog.Fatalf("Driver initialization failed: %v", err) + } + + if provideControllerService { + err = d.SetupControllerService() + if err != nil { + klog.Fatalf("Driver controller service initialization failed: %v", err) + } + } + + if provideNodeService { + err = d.SetupNodeService() + if err != nil { + klog.Fatalf("Driver node service initialization failed: %v", err) + } } runtimeconfig.RuntimeConfigFilename = runtimeConfigFile @@ -105,10 +123,7 @@ func main() { cmd.PersistentFlags().StringVar(&driverName, "drivername", "manila.csi.openstack.org", "name of the driver") - cmd.PersistentFlags().StringVar(&nodeID, "nodeid", "", "this node's ID") - if err := cmd.MarkPersistentFlagRequired("nodeid"); err != nil { - klog.Fatalf("Unable to mark flag nodeid to be required: %v", err) - } + cmd.PersistentFlags().StringVar(&nodeID, "nodeid", "", "this node's ID. This value is required if the node service is provided by this CSI driver instance.") cmd.PersistentFlags().StringVar(&nodeAZ, "nodeaz", "", "this node's availability zone") @@ -132,6 +147,9 @@ func main() { cmd.PersistentFlags().StringVar(&clusterID, "cluster-id", "", "The identifier of the cluster that the plugin is running in.") + cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)") + cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)") + code := cli.Run(cmd) os.Exit(code) } diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md index 4bcb1eaeed..c3a3656ffd 100644 --- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md +++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md @@ -98,6 +98,19 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f The default is empty string, which means the server is disabled. +
--provide-controller-service <enabled>
+
+ If set to true then the CSI driver does provide the controller service. + + The default is to provide the controller service. +
+ +
--provide-node-service <enabled>
+
+ If set to true then the CSI driver does provide the node service. + + The default is to provide the node service. +
## Driver Config @@ -114,7 +127,7 @@ Implementation of `cinder-csi-plugin` relies on following OpenStack services. For Driver configuration, parameters must be passed via configuration file specified in `$CLOUD_CONFIG` environment variable. The following sections are supported in configuration file. -### Global +### Global For Cinder CSI Plugin to authenticate with OpenStack Keystone, required parameters needs to be passed in `[Global]` section of the file. For all supported parameters, please refer [Global](../openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#global) section. ### Block Storage @@ -196,7 +209,7 @@ cinder.csi.openstack.org true true false < mountPath: /etc/cacert readOnly: true - volumes: + volumes: .... - name: cacert hostPath: @@ -254,7 +267,7 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi | StorageClass `parameters` | `availability` | `nova` | String. Volume Availability Zone | | StorageClass `parameters` | `type` | Empty String | String. Name/ID of Volume type. Corresponding volume type should exist in cinder | | VolumeSnapshotClass `parameters` | `force-create` | `false` | Enable to support creating snapshot for a volume in in-use status | -| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| +| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| | Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder | ## Local Development @@ -266,14 +279,14 @@ To build the plugin, run ``` $ export ARCH=amd64 # Defaults to amd64 $ make build-cmd-cinder-csi-plugin -``` +``` To build cinder-csi-plugin image ``` $ export ARCH=amd64 # Defaults to amd64 $ make build-local-image-cinder-csi-plugin -``` +``` ### Testing @@ -284,7 +297,7 @@ To run all unit tests: $ make test ``` #### Sanity Tests -Sanity tests ensures the CSI spec conformance of the driver. For more info, refer [Sanity check](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) +Sanity tests ensures the CSI spec conformance of the driver. For more info, refer [Sanity check](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) Run sanity tests for cinder CSI driver using: @@ -298,5 +311,5 @@ Optionally, to test the driver csc tool could be used. please refer, [usage guid Starting from Kubernetes 1.21, OpenStack Cinder CSI migration is supported as beta feature and is `ON` by default. Cinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. If you have persistence volumes that are created with in-tree `kubernetes.io/cinder` plugin, you could migrate to use `cinder.csi.openstack.org` Container Storage Interface (CSI) Driver. -* The CSI Migration feature for Cinder, when enabled, shims all plugin operations from the existing in-tree plugin to the `cinder.csi.openstack.org` CSI Driver. +* The CSI Migration feature for Cinder, when enabled, shims all plugin operations from the existing in-tree plugin to the `cinder.csi.openstack.org` CSI Driver. * For more info, please refer [Migrate to CCM with CSI Migration](../openstack-cloud-controller-manager/migrate-to-ccm-with-csimigration.md#migrate-from-in-tree-cloud-provider-to-openstack-cloud-controller-manager-and-enable-csimigration) guide diff --git a/docs/manila-csi-plugin/using-manila-csi-plugin.md b/docs/manila-csi-plugin/using-manila-csi-plugin.md index bfca15c56a..f9e02636eb 100644 --- a/docs/manila-csi-plugin/using-manila-csi-plugin.md +++ b/docs/manila-csi-plugin/using-manila-csi-plugin.md @@ -40,6 +40,8 @@ Option | Default value | Description `--share-protocol-selector` | _none_ | Specifies which Manila share protocol to use for this instance of the driver. See [supported protocols](#share-protocol-support-matrix) for valid values. `--fwdendpoint` | _none_ | [CSI Node Plugin](https://github.com/container-storage-interface/spec/blob/master/spec.md#rpc-interface) endpoint to which all Node Service RPCs are forwarded. Must be able to handle the file-system specified in `share-protocol-selector`. Check out the [Deployment](#deployment) section to see why this is necessary. `--cluster-id` | _none_ | The identifier of the cluster that the plugin is running in. If set then the plugin will add "manila.csi.openstack.org/cluster: \" to metadata of created shares. +`--provide-controller-service` | `true` | If set to true then the CSI driver does provide the controller service. +`--provide-node-service` | `true` | If set to true then the CSI driver does provide the node service. ### Controller Service volume parameters @@ -56,7 +58,7 @@ Parameter | Required | Description `cephfs-kernelMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS kernel client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-fuseMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS FUSE client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-clientID` | _no_ | Relevant for CephFS Manila shares. Specifies the cephx client ID when creating an access rule for the provisioned share. The same cephx client ID may be shared with multiple Manila shares. If no value is provided, client ID for the provisioned Manila share will be set to some unique value (PersistentVolume name). -`nfs-shareClient` | _no_ | Relevant for NFS Manila shares. Specifies what address has access to the NFS share. Defaults to `0.0.0.0/0`, i.e. anyone. +`nfs-shareClient` | _no_ | Relevant for NFS Manila shares. Specifies what address has access to the NFS share. Defaults to `0.0.0.0/0`, i.e. anyone. ### Node Service volume context @@ -199,7 +201,7 @@ CSI Manila Helm chart is located in `charts/manila-csi-plugin`. First, modify `values.yaml` to suite your environment, and then simply install the Helm chart with `$ helm install ./charts/manila-csi-plugin`. -Note that the release name generated by `helm install` may not be suitable due to their length. The chart generates object names with the release name included in them, which may cause the names to exceed 63 characters and result in chart installation failure. You may use `--name` flag to set the release name manually. See [helm installation docs](https://helm.sh/docs/helm/#helm-install) for more info. Alternatively, you may also use `nameOverride` or `fullnameOverride` variables in `values.yaml` to override the respective names. +Note that the release name generated by `helm install` may not be suitable due to their length. The chart generates object names with the release name included in them, which may cause the names to exceed 63 characters and result in chart installation failure. You may use `--name` flag to set the release name manually. See [helm installation docs](https://helm.sh/docs/helm/#helm-install) for more info. Alternatively, you may also use `nameOverride` or `fullnameOverride` variables in `values.yaml` to override the respective names. **Manual deployment** diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index afa0b02c7b..b0a712cca2 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -34,7 +34,7 @@ func init() { osmock = new(openstack.OpenStackMock) openstack.OsInstance = osmock - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) fakeCs = NewControllerServer(d, openstack.OsInstance) } @@ -42,7 +42,6 @@ func init() { // Test CreateVolume func TestCreateVolume(t *testing.T) { - // mock OpenStack properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) @@ -89,7 +88,6 @@ func TestCreateVolume(t *testing.T) { // Test CreateVolume with additional param func TestCreateVolumeWithParam(t *testing.T) { - // mock OpenStack properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) @@ -141,7 +139,6 @@ func TestCreateVolumeWithParam(t *testing.T) { } func TestCreateVolumeWithExtraMetadata(t *testing.T) { - // mock OpenStack properties := map[string]string{ "cinder.csi.openstack.org/cluster": FakeCluster, @@ -188,7 +185,6 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { } func TestCreateVolumeFromSnapshot(t *testing.T) { - properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", FakeSnapshotID, "", &properties).Return(&FakeVolFromSnapshot, nil) @@ -236,7 +232,6 @@ func TestCreateVolumeFromSnapshot(t *testing.T) { } func TestCreateVolumeFromSourceVolume(t *testing.T) { - properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", "", FakeVolID, &properties).Return(&FakeVolFromSourceVolume, nil) @@ -285,7 +280,6 @@ func TestCreateVolumeFromSourceVolume(t *testing.T) { // Test CreateVolumeDuplicate func TestCreateVolumeDuplicate(t *testing.T) { - // Init assert assert := assert.New(t) @@ -318,7 +312,6 @@ func TestCreateVolumeDuplicate(t *testing.T) { // Test DeleteVolume func TestDeleteVolume(t *testing.T) { - // DeleteVolume(volumeID string) error osmock.On("DeleteVolume", FakeVolID).Return(nil) @@ -345,7 +338,6 @@ func TestDeleteVolume(t *testing.T) { // Test ControllerPublishVolume func TestControllerPublishVolume(t *testing.T) { - // AttachVolume(instanceID, volumeID string) (string, error) osmock.On("AttachVolume", FakeNodeID, FakeVolID).Return(FakeVolID, nil) // WaitDiskAttached(instanceID string, volumeID string) error @@ -387,7 +379,6 @@ func TestControllerPublishVolume(t *testing.T) { // Test ControllerUnpublishVolume func TestControllerUnpublishVolume(t *testing.T) { - // DetachVolume(instanceID, volumeID string) error osmock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil) // WaitDiskDetached(instanceID string, volumeID string) error @@ -416,7 +407,6 @@ func TestControllerUnpublishVolume(t *testing.T) { } func TestListVolumes(t *testing.T) { - osmock.On("ListVolumes", 2, FakeVolID).Return(FakeVolListMultiple, "", nil) // Init assert @@ -461,7 +451,6 @@ func TestListVolumes(t *testing.T) { // Test CreateSnapshot func TestCreateSnapshot(t *testing.T) { - osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, &map[string]string{cinderCSIClusterIDKey: "cluster"}).Return(&FakeSnapshotRes, nil) osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(nil) @@ -490,7 +479,6 @@ func TestCreateSnapshot(t *testing.T) { // Test CreateSnapshot with extra metadata func TestCreateSnapshotWithExtraMetadata(t *testing.T) { - properties := map[string]string{ "cinder.csi.openstack.org/cluster": FakeCluster, "csi.storage.k8s.io/volumesnapshot/name": FakeSnapshotName, @@ -532,7 +520,6 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) { // Test DeleteSnapshot func TestDeleteSnapshot(t *testing.T) { - // DeleteSnapshot(volumeID string) error osmock.On("DeleteSnapshot", FakeSnapshotID).Return(nil) @@ -558,7 +545,6 @@ func TestDeleteSnapshot(t *testing.T) { } func TestListSnapshots(t *testing.T) { - osmock.On("ListSnapshots", map[string]string{"Limit": "1", "Marker": FakeVolID, "Status": "available"}).Return(FakeSnapshotsRes, "", nil) assert := assert.New(t) @@ -574,7 +560,6 @@ func TestListSnapshots(t *testing.T) { } func TestControllerExpandVolume(t *testing.T) { - tState := []string{"available", "in-use"} // ExpandVolume(volumeID string, status string, size int) osmock.On("ExpandVolume", FakeVolID, openstack.VolumeAvailableStatus, 5).Return(nil) @@ -611,7 +596,6 @@ func TestControllerExpandVolume(t *testing.T) { } func TestValidateVolumeCapabilities(t *testing.T) { - // GetVolume(volumeID string) osmock.On("GetVolume", FakeVolID).Return(FakeVol1) diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index a51cb9d06a..bd75fbce9d 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -70,13 +70,17 @@ type Driver struct { nscap []*csi.NodeServiceCapability } -func NewDriver(endpoint, cluster string) *Driver { +type DriverOpts struct { + ClusterID string + Endpoint string +} +func NewDriver(o *DriverOpts) *Driver { d := &Driver{} d.name = driverName d.fqVersion = fmt.Sprintf("%s@%s", Version, version.Version) - d.endpoint = endpoint - d.cluster = cluster + d.endpoint = o.Endpoint + d.cluster = o.ClusterID klog.Info("Driver: ", d.name) klog.Info("Driver version: ", d.fqVersion) @@ -108,6 +112,8 @@ func NewDriver(endpoint, cluster string) *Driver { csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, }) + d.ids = NewIdentityServer(d) + return d } @@ -166,15 +172,20 @@ func (d *Driver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_Access return d.vcap } -func (d *Driver) SetupDriver(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { - - d.ids = NewIdentityServer(d) +func (d *Driver) SetupControllerService(cloud openstack.IOpenStack) { + klog.Info("Providing controller service") d.cs = NewControllerServer(d, cloud) - d.ns = NewNodeServer(d, mount, metadata, cloud) +} +func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { + klog.Info("Providing node service") + d.ns = NewNodeServer(d, mount, metadata, cloud) } func (d *Driver) Run() { + if nil == d.cs && nil == d.ns { + klog.Fatal("No CSI services initialized") + } - RunControllerandNodePublishServer(d.endpoint, d.ids, d.cs, d.ns) + RunServicesInitialized(d.endpoint, d.ids, d.cs, d.ns) } diff --git a/pkg/csi/cinder/driver_test.go b/pkg/csi/cinder/driver_test.go index 511556c3f7..3f37ab1416 100644 --- a/pkg/csi/cinder/driver_test.go +++ b/pkg/csi/cinder/driver_test.go @@ -29,10 +29,11 @@ var ( func NewFakeDriver() *Driver { - driver := NewDriver(FakeEndpoint, FakeCluster) + driver := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) return driver } + func TestValidateControllerServiceRequest(t *testing.T) { d := NewFakeDriver() diff --git a/pkg/csi/cinder/identityserver_test.go b/pkg/csi/cinder/identityserver_test.go index ce34ebca22..37a266addb 100644 --- a/pkg/csi/cinder/identityserver_test.go +++ b/pkg/csi/cinder/identityserver_test.go @@ -26,7 +26,7 @@ import ( ) func TestGetPluginInfo(t *testing.T) { - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) ids := NewIdentityServer(d) diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index d6762dc788..8e67d6f70c 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -38,7 +38,7 @@ var omock *openstack.OpenStackMock func init() { if fakeNs == nil { - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) // mock MountMock mmock = new(mount.MountMock) @@ -142,7 +142,7 @@ func TestNodePublishVolumeEphermeral(t *testing.T) { metadata.MetadataService = metamock openstack.OsInstance = omock - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) // Init assert @@ -281,7 +281,6 @@ func TestNodeUnpublishVolume(t *testing.T) { } func TestNodeUnpublishVolumeEphermeral(t *testing.T) { - mount.MInstance = mmock metadata.MetadataService = metamock openstack.OsInstance = omock @@ -293,7 +292,7 @@ func TestNodeUnpublishVolumeEphermeral(t *testing.T) { omock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil) omock.On("DeleteVolume", FakeVolID).Return(nil) - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) // Init assert diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go index 7e3e925c98..5758065cde 100644 --- a/pkg/csi/cinder/utils.go +++ b/pkg/csi/cinder/utils.go @@ -68,8 +68,7 @@ func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, c //revive:enable:unexported-return -func RunControllerandNodePublishServer(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { - +func RunServicesInitialized(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { s := NewNonBlockingGRPCServer() s.Start(endpoint, ids, cs, ns) s.Wait() diff --git a/pkg/csi/manila/driver.go b/pkg/csi/manila/driver.go index 5783a23bb9..80e3d849aa 100644 --- a/pkg/csi/manila/driver.go +++ b/pkg/csi/manila/driver.go @@ -99,7 +99,6 @@ func argNotEmpty(val, name string) error { func NewDriver(o *DriverOpts) (*Driver, error) { m := map[string]string{ - "node ID": o.NodeID, "driver name": o.DriverName, "driver endpoint": o.ServerCSIEndpoint, "FWD endpoint": o.FwdCSIEndpoint, @@ -151,6 +150,14 @@ func NewDriver(o *DriverOpts) (*Driver, error) { d.serverEndpoint = endpointAddress(serverProto, serverAddr) d.fwdEndpoint = endpointAddress(fwdProto, fwdAddr) + d.ids = &identityServer{d: d} + + return d, nil +} + +func (d *Driver) SetupControllerService() error { + klog.Info("Providing controller service") + d.addControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, @@ -165,11 +172,22 @@ func NewDriver(o *DriverOpts) (*Driver, error) { csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, }) + d.cs = &controllerServer{d: d} + return nil +} + +func (d *Driver) SetupNodeService() error { + if err := argNotEmpty(d.nodeID, "node ID"); err != nil { + return err + } + + klog.Info("Providing node service") + var supportsNodeStage bool nodeCapsMap, err := d.initProxiedDriver() if err != nil { - return nil, fmt.Errorf("failed to initialize proxied CSI driver: %v", err) + return fmt.Errorf("failed to initialize proxied CSI driver: %v", err) } nscaps := make([]csi.NodeServiceCapability_RPC_Type, 0, len(nodeCapsMap)) for c := range nodeCapsMap { @@ -182,14 +200,15 @@ func NewDriver(o *DriverOpts) (*Driver, error) { d.addNodeServiceCapabilities(nscaps) - d.ids = &identityServer{d: d} - d.cs = &controllerServer{d: d} d.ns = &nodeServer{d: d, supportsNodeStage: supportsNodeStage, nodeStageCache: make(map[volumeID]stageCacheEntry)} - - return d, nil + return nil } func (d *Driver) Run() { + if nil == d.cs && nil == d.ns { + klog.Fatal("No CSI services initialized") + } + s := nonBlockingGRPCServer{} s.start(d.serverEndpoint, d.ids, d.cs, d.ns) s.wait() @@ -319,9 +338,15 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids *identityServer, cs * s.server = server - csi.RegisterIdentityServer(server, ids) - csi.RegisterControllerServer(server, cs) - csi.RegisterNodeServer(server, ns) + if ids != nil { + csi.RegisterIdentityServer(server, ids) + } + if cs != nil { + csi.RegisterControllerServer(server, cs) + } + if ns != nil { + csi.RegisterNodeServer(server, ns) + } klog.Infof("listening for connections on %#v", listener.Addr()) diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go index a3d13d33fc..4e3a2abd65 100644 --- a/tests/sanity/cinder/sanity_test.go +++ b/tests/sanity/cinder/sanity_test.go @@ -19,14 +19,16 @@ func TestDriver(t *testing.T) { endpoint := "unix://" + socket cluster := "kubernetes" - d := cinder.NewDriver(endpoint, cluster) + d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) + fakecloudprovider := getfakecloud() openstack.OsInstance = fakecloudprovider fakemnt := GetFakeMountProvider() fakemet := &fakemetadata{} - d.SetupDriver(fakecloudprovider, fakemnt, fakemet) + d.SetupControllerService(fakecloudprovider) + d.SetupNodeService(fakecloudprovider, fakemnt, fakemet) // TODO: Stop call diff --git a/tests/sanity/manila/sanity_test.go b/tests/sanity/manila/sanity_test.go index 8fb7b17b8b..8a9d672f85 100644 --- a/tests/sanity/manila/sanity_test.go +++ b/tests/sanity/manila/sanity_test.go @@ -43,10 +43,20 @@ func TestDriver(t *testing.T) { FwdCSIEndpoint: fwdEndpoint, ManilaClientBuilder: &fakeManilaClientBuilder{}, CSIClientBuilder: &fakeCSIClientBuilder{}, - }) + }, + ) + if err != nil { + t.Fatalf("Failed to initialize CSI Manila driver: %v", err) + } + + err = d.SetupControllerService() + if err != nil { + t.Fatalf("Failed to initialize CSI Manila controller service: %v", err) + } + err = d.SetupNodeService() if err != nil { - t.Fatalf("failed to initialize CSI Manila driver: %v", err) + t.Fatalf("Failed to initialize CSI Manila node service: %v", err) } go d.Run() From 6eda64925d8b45f7f161a1727ddea5de9cec92b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Andr=C3=A9?= Date: Thu, 8 Feb 2024 10:11:45 +0100 Subject: [PATCH 05/23] [ci] Bump golangci-lint for go 1.22 (#2544) The test image was recently changed and now ships with go1.22rc2. We need to use a version of golangci-lint that is compatible with it. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e3db00d232..d055a45f26 100644 --- a/Makefile +++ b/Makefile @@ -81,7 +81,7 @@ $(BUILD_CMDS): $(SOURCES) test: unit functional check: work - go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2 run ./... + go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.56.0 run ./... unit: work go test -tags=unit $(shell go list ./... | sed -e '/sanity/ { N; d; }' | sed -e '/tests/ {N; d;}') $(TESTARGS) From e87f50614b96a6af6095660aa6a77457223b344b Mon Sep 17 00:00:00 2001 From: Sebastian Rojas <64033052+Sebastian-RG@users.noreply.github.com> Date: Thu, 8 Feb 2024 16:00:57 -0500 Subject: [PATCH 06/23] cinder-csi: Adds support for managing backups (#2473) (#2480) Signed-off-by: Sebastian-RG --- .../using-cinder-csi-plugin.md | 4 +- pkg/csi/cinder/controllerserver.go | 327 +++++++++++++++--- pkg/csi/cinder/controllerserver_test.go | 31 +- pkg/csi/cinder/fake.go | 3 + pkg/csi/cinder/nodeserver.go | 2 +- pkg/csi/cinder/nodeserver_test.go | 2 +- pkg/csi/cinder/openstack/openstack.go | 13 +- pkg/csi/cinder/openstack/openstack_backups.go | 221 ++++++++++++ pkg/csi/cinder/openstack/openstack_mock.go | 133 ++++++- .../cinder/openstack/openstack_snapshots.go | 21 +- pkg/csi/cinder/openstack/openstack_volumes.go | 20 +- tests/sanity/cinder/fakecloud.go | 83 ++++- 12 files changed, 758 insertions(+), 102 deletions(-) create mode 100644 pkg/csi/cinder/openstack/openstack_backups.go diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md index c3a3656ffd..20638ea8a0 100644 --- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md +++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md @@ -267,7 +267,9 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi | StorageClass `parameters` | `availability` | `nova` | String. Volume Availability Zone | | StorageClass `parameters` | `type` | Empty String | String. Name/ID of Volume type. Corresponding volume type should exist in cinder | | VolumeSnapshotClass `parameters` | `force-create` | `false` | Enable to support creating snapshot for a volume in in-use status | -| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| +| VolumeSnapshotClass `parameters` | `type` | Empty String | `snapshot` creates a VolumeSnapshot object linked to a Cinder volume snapshot. `backup` creates a VolumeSnapshot object linked to a cinder volume backup. Defaults to `snapshot` if not defined | +| VolumeSnapshotClass `parameters` | `backup-max-duration-seconds-per-gb` | `20` | Defines the amount of time to wait for a backup to complete in seconds per GB of volume size | +| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| | Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder | ## Local Development diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 110d3e3ec8..4768705ab8 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -17,9 +17,11 @@ limitations under the License. package cinder import ( + "fmt" "strconv" "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/backups" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" @@ -110,36 +112,71 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } content := req.GetVolumeContentSource() var snapshotID string - var sourcevolID string + var sourceVolID string + var sourceBackupID string + var backupsAreEnabled bool + backupsAreEnabled, err = cloud.BackupsAreEnabled() + klog.V(4).Infof("Backups enabled: %v", backupsAreEnabled) + if err != nil { + klog.Errorf("Failed to check if backups are enabled: %v", err) + } if content != nil && content.GetSnapshot() != nil { snapshotID = content.GetSnapshot().GetSnapshotId() - _, err := cloud.GetSnapshotByID(snapshotID) - if err != nil { - if cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "VolumeContentSource Snapshot %s not found", snapshotID) + + snap, err := cloud.GetSnapshotByID(snapshotID) + if err != nil && !cpoerrors.IsNotFound(err) { + return nil, err + } + // If the snapshot exists but is not yet available, fail. + if err == nil && snap.Status != "available" { + return nil, status.Errorf(codes.Unavailable, "VolumeContentSource Snapshot %s is not yet available. status: %s", snapshotID, snap.Status) + } + + // In case a snapshot is not found + // check if a Backup with the same ID exists + if backupsAreEnabled && cpoerrors.IsNotFound(err) { + back, err := cloud.GetBackupByID(snapshotID) + if err != nil { + //If there is an error getting the backup as well, fail. + return nil, status.Errorf(codes.NotFound, "VolumeContentSource Snapshot or Backup with ID %s not found", snapshotID) + } + if back.Status != "available" { + // If the backup exists but is not yet available, fail. + return nil, status.Errorf(codes.Unavailable, "VolumeContentSource Backup %s is not yet available. status: %s", snapshotID, back.Status) } - return nil, status.Errorf(codes.Internal, "Failed to retrieve the snapshot %s: %v", snapshotID, err) + // If an available backup is found, create the volume from the backup + sourceBackupID = snapshotID + snapshotID = "" } + // In case GetSnapshotByID has error IsNotFound and backups are not enabled + // TODO: Change 'snapshotID == ""' to '!backupsAreEnabled' when cloud.BackupsAreEnabled() is correctly implemented + if cpoerrors.IsNotFound(err) && snapshotID == "" { + return nil, err + } + } if content != nil && content.GetVolume() != nil { - sourcevolID = content.GetVolume().GetVolumeId() - _, err := cloud.GetVolume(sourcevolID) + sourceVolID = content.GetVolume().GetVolumeId() + _, err := cloud.GetVolume(sourceVolID) if err != nil { if cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "Source Volume %s not found", sourcevolID) + return nil, status.Errorf(codes.NotFound, "Source Volume %s not found", sourceVolID) } - return nil, status.Errorf(codes.Internal, "Failed to retrieve the source volume %s: %v", sourcevolID, err) + return nil, status.Errorf(codes.Internal, "Failed to retrieve the source volume %s: %v", sourceVolID, err) } } - vol, err := cloud.CreateVolume(volName, volSizeGB, volType, volAvailability, snapshotID, sourcevolID, &properties) + vol, err := cloud.CreateVolume(volName, volSizeGB, volType, volAvailability, snapshotID, sourceVolID, sourceBackupID, properties) + // When creating a volume from a backup, the response does not include the backupID. + if sourceBackupID != "" { + vol.BackupID = &sourceBackupID + } if err != nil { klog.Errorf("Failed to CreateVolume: %v", err) return nil, status.Errorf(codes.Internal, "CreateVolume failed with error %v", err) - } klog.V(4).Infof("CreateVolume: Successfully created volume %s in Availability Zone: %s of size %d GiB", vol.ID, vol.AvailabilityZone, vol.Size) @@ -326,6 +363,25 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS name := req.Name volumeID := req.GetSourceVolumeId() + snapshotType := req.Parameters[openstack.SnapshotType] + filters := map[string]string{"Name": name} + backupMaxDurationSecondsPerGB := openstack.BackupMaxDurationSecondsPerGBDefault + + // Current time, used for CreatedAt + var ctime *timestamppb.Timestamp + // Size of the created snapshot, used to calculate the amount of time to wait for the backup to finish + var snapSize int + // If true, skips creating a snapshot because a backup already exists + var backupAlreadyExists bool + var snap *snapshots.Snapshot + var backup *backups.Backup + var backups []backups.Backup + var err error + + // Set snapshot type to 'snapshot' by default + if snapshotType == "" { + snapshotType = "snapshot" + } if name == "" { return nil, status.Error(codes.InvalidArgument, "Snapshot name must be provided in CreateSnapshot request") @@ -335,73 +391,216 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.InvalidArgument, "VolumeID must be provided in CreateSnapshot request") } - // Verify a snapshot with the provided name doesn't already exist for this tenant - var snap *snapshots.Snapshot + // Verify snapshot type has a valid value + if snapshotType != "snapshot" && snapshotType != "backup" { + return nil, status.Error(codes.InvalidArgument, "Snapshot type must be 'backup', 'snapshot' or not defined") + } + var backupsAreEnabled bool + backupsAreEnabled, err = cs.Cloud.BackupsAreEnabled() + klog.V(4).Infof("Backups enabled: %v", backupsAreEnabled) + if err != nil { + klog.Errorf("Failed to check if backups are enabled: %v", err) + } + + // Prechecks in case of a backup + if snapshotType == "backup" { + if !backupsAreEnabled { + return nil, status.Error(codes.FailedPrecondition, "Backups are not enabled in Cinder") + } + // Get a list of backups with the provided name + backups, err = cs.Cloud.ListBackups(filters) + if err != nil { + klog.Errorf("Failed to query for existing Backup during CreateSnapshot: %v", err) + return nil, status.Error(codes.Internal, "Failed to get backups") + } + // If more than one backup with the provided name exists, fail + if len(backups) > 1 { + klog.Errorf("found multiple existing backups with selected name (%s) during create", name) + return nil, status.Error(codes.Internal, "Multiple backups reported by Cinder with same name") + } + + if len(backups) == 1 { + backup = &backups[0] + + // Verify the existing backup has the same VolumeID, otherwise it belongs to another volume + if backup.VolumeID != volumeID { + return nil, status.Error(codes.AlreadyExists, "Backup with given name already exists, with different source volume ID") + } + + // If a backup of the volume already exists, skip creating the snapshot + backupAlreadyExists = true + klog.V(3).Infof("Found existing backup %s from volume with ID: %s", name, volumeID) + } + + // Get the max duration to wait in seconds per GB of snapshot and fail if parsing fails + if item, ok := (req.Parameters)[openstack.BackupMaxDurationPerGB]; ok { + backupMaxDurationSecondsPerGB, err = strconv.Atoi(item) + if err != nil { + klog.Errorf("Setting backup-max-duration-seconds-per-gb failed due to a parsing error: %v", err) + return nil, status.Error(codes.Internal, "Failed to parse backup-max-duration-seconds-per-gb") + } + } + } + + // Create the snapshot if the backup does not already exist and wait for it to be ready + if !backupAlreadyExists { + snap, err = cs.createSnapshot(name, volumeID, req.Parameters) + if err != nil { + return nil, err + } + + ctime = timestamppb.New(snap.CreatedAt) + if err = ctime.CheckValid(); err != nil { + klog.Errorf("Error to convert time to timestamp: %v", err) + } + + snap.Status, err = cs.Cloud.WaitSnapshotReady(snap.ID) + if err != nil { + klog.Errorf("Failed to WaitSnapshotReady: %v", err) + return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error: %v. Current snapshot status: %v", err, snap.Status) + } + + snapSize = snap.Size + } + + if snapshotType == "snapshot" { + return &csi.CreateSnapshotResponse{ + Snapshot: &csi.Snapshot{ + SnapshotId: snap.ID, + SizeBytes: int64(snap.Size * 1024 * 1024 * 1024), + SourceVolumeId: snap.VolumeID, + CreationTime: ctime, + ReadyToUse: true, + }, + }, nil + } + + // If snapshotType is 'backup', create a backup from the snapshot and delete the snapshot. + if snapshotType == "backup" { + + if !backupAlreadyExists { + backup, err = cs.createBackup(name, volumeID, snap, req.Parameters) + if err != nil { + return nil, err + } + } + + ctime = timestamppb.New(backup.CreatedAt) + if err := ctime.CheckValid(); err != nil { + klog.Errorf("Error to convert time to timestamp: %v", err) + } + + backup.Status, err = cs.Cloud.WaitBackupReady(backup.ID, snapSize, backupMaxDurationSecondsPerGB) + if err != nil { + klog.Errorf("Failed to WaitBackupReady: %v", err) + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v. Current backups status: %s", err, backup.Status)) + } + + // Necessary to get all the backup information, including size. + backup, err = cs.Cloud.GetBackupByID(backup.ID) + if err != nil { + klog.Errorf("Failed to GetBackupByID after backup creation: %v", err) + return nil, status.Error(codes.Internal, fmt.Sprintf("GetBackupByID failed with error %v", err)) + } + + err = cs.Cloud.DeleteSnapshot(snap.ID) + if err != nil && !cpoerrors.IsNotFound(err) { + klog.Errorf("Failed to DeleteSnapshot: %v", err) + return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot failed with error %v", err)) + } + } + + return &csi.CreateSnapshotResponse{ + Snapshot: &csi.Snapshot{ + SnapshotId: backup.ID, + SizeBytes: int64(backup.Size * 1024 * 1024 * 1024), + SourceVolumeId: backup.VolumeID, + CreationTime: ctime, + ReadyToUse: true, + }, + }, nil + +} + +func (cs *controllerServer) createSnapshot(name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { + filters := map[string]string{} filters["Name"] = name + + // List existing snapshots with the same name snapshots, _, err := cs.Cloud.ListSnapshots(filters) if err != nil { klog.Errorf("Failed to query for existing Snapshot during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get snapshots") } + // If more than one snapshot with the provided name exists, fail + if len(snapshots) > 1 { + klog.Errorf("found multiple existing snapshots with selected name (%s) during create", name) + + return nil, status.Error(codes.Internal, "Multiple snapshots reported by Cinder with same name") + } + + // Verify a snapshot with the provided name doesn't already exist for this tenant if len(snapshots) == 1 { snap = &snapshots[0] - if snap.VolumeID != volumeID { return nil, status.Error(codes.AlreadyExists, "Snapshot with given name already exists, with different source volume ID") } + // If the snapshot for the correct volume already exists, return it klog.V(3).Infof("Found existing snapshot %s from volume with ID: %s", name, volumeID) + return snap, nil + } - } else if len(snapshots) > 1 { - klog.Errorf("found multiple existing snapshots with selected name (%s) during create", name) - return nil, status.Error(codes.Internal, "Multiple snapshots reported by Cinder with same name") - - } else { - // Add cluster ID to the snapshot metadata - properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster} - - // see https://github.com/kubernetes-csi/external-snapshotter/pull/375/ - // Also, we don't want to tag every param but we still want to send the - // 'force-create' flag to openstack layer so that we will honor the - // force create functions - for _, mKey := range []string{"csi.storage.k8s.io/volumesnapshot/name", "csi.storage.k8s.io/volumesnapshot/namespace", "csi.storage.k8s.io/volumesnapshotcontent/name", openstack.SnapshotForceCreate} { - if v, ok := req.Parameters[mKey]; ok { - properties[mKey] = v - } - } + // Add cluster ID to the snapshot metadata + properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster} - // TODO: Delegate the check to openstack itself and ignore the conflict - snap, err = cs.Cloud.CreateSnapshot(name, volumeID, &properties) - if err != nil { - klog.Errorf("Failed to Create snapshot: %v", err) - return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) + // see https://github.com/kubernetes-csi/external-snapshotter/pull/375/ + // Also, we don't want to tag every param but we still want to send the + // 'force-create' flag to openstack layer so that we will honor the + // force create functions + for _, mKey := range []string{"csi.storage.k8s.io/volumesnapshot/name", "csi.storage.k8s.io/volumesnapshot/namespace", "csi.storage.k8s.io/volumesnapshotcontent/name", openstack.SnapshotForceCreate} { + if v, ok := parameters[mKey]; ok { + properties[mKey] = v } + } - klog.V(3).Infof("CreateSnapshot %s from volume with ID: %s", name, volumeID) + // TODO: Delegate the check to openstack itself and ignore the conflict + snap, err = cs.Cloud.CreateSnapshot(name, volumeID, properties) + if err != nil { + klog.Errorf("Failed to Create snapshot: %v", err) + return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) } - ctime := timestamppb.New(snap.CreatedAt) - if err := ctime.CheckValid(); err != nil { - klog.Errorf("Error to convert time to timestamp: %v", err) + klog.V(3).Infof("CreateSnapshot %s from volume with ID: %s", name, volumeID) + + return snap, nil +} + +func (cs *controllerServer) createBackup(name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { + + // Add cluster ID to the snapshot metadata + properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster} + + // see https://github.com/kubernetes-csi/external-snapshotter/pull/375/ + // Also, we don't want to tag every param but we still want to send the + // 'force-create' flag to openstack layer so that we will honor the + // force create functions + for _, mKey := range []string{"csi.storage.k8s.io/volumesnapshot/name", "csi.storage.k8s.io/volumesnapshot/namespace", "csi.storage.k8s.io/volumesnapshotcontent/name", openstack.SnapshotForceCreate, openstack.SnapshotType} { + if v, ok := parameters[mKey]; ok { + properties[mKey] = v + } } - err = cs.Cloud.WaitSnapshotReady(snap.ID) + backup, err := cs.Cloud.CreateBackup(name, volumeID, snap.ID, properties) if err != nil { - klog.Errorf("Failed to WaitSnapshotReady: %v", err) - return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) + klog.Errorf("Failed to Create backup: %v", err) + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v", err)) } + klog.V(4).Infof("Backup created: %+v", backup) - return &csi.CreateSnapshotResponse{ - Snapshot: &csi.Snapshot{ - SnapshotId: snap.ID, - SizeBytes: int64(snap.Size * 1024 * 1024 * 1024), - SourceVolumeId: snap.VolumeID, - CreationTime: ctime, - ReadyToUse: true, - }, - }, nil + return backup, nil } func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { @@ -413,8 +612,18 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS return nil, status.Error(codes.InvalidArgument, "Snapshot ID must be provided in DeleteSnapshot request") } + // If volumeSnapshot object was linked to a cinder backup, delete the backup. + back, err := cs.Cloud.GetBackupByID(id) + if err == nil && back != nil { + err = cs.Cloud.DeleteBackup(id) + if err != nil { + klog.Errorf("Failed to Delete backup: %v", err) + return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteBackup failed with error %v", err)) + } + } + // Delegate the check to openstack itself - err := cs.Cloud.DeleteSnapshot(id) + err = cs.Cloud.DeleteSnapshot(id) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s is already deleted.", id) @@ -675,6 +884,16 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl } } + if vol.BackupID != nil && *vol.BackupID != "" { + volsrc = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: *vol.BackupID, + }, + }, + } + } + var accessibleTopology []*csi.Topology // If ignore-volume-az is true , dont set the accessible topology to volume az, // use from preferred topologies instead. diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index b0a712cca2..f8d7bcd541 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -44,8 +44,8 @@ func init() { func TestCreateVolume(t *testing.T) { // mock OpenStack properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) - osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", &properties).Return(&FakeVol, nil) + // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&FakeVol, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) // Init assert @@ -90,9 +90,9 @@ func TestCreateVolume(t *testing.T) { func TestCreateVolumeWithParam(t *testing.T) { // mock OpenStack properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) + // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) // Vol type and availability comes from CreateVolumeRequest.Parameters - osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), "dummyVolType", "cinder", "", "", &properties).Return(&FakeVol, nil) + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), "dummyVolType", "cinder", "", "", "", properties).Return(&FakeVol, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) // Init assert @@ -146,8 +146,8 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { "csi.storage.k8s.io/pvc/name": FakePVCName, "csi.storage.k8s.io/pvc/namespace": FakePVCNamespace, } - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) - osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", &properties).Return(&FakeVol, nil) + // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, FakeAvailability, "", "", "", properties).Return(&FakeVol, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) @@ -186,8 +186,8 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { func TestCreateVolumeFromSnapshot(t *testing.T) { properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) - osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", FakeSnapshotID, "", &properties).Return(&FakeVolFromSnapshot, nil) + // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", FakeSnapshotID, "", "", properties).Return(&FakeVolFromSnapshot, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) // Init assert @@ -233,8 +233,8 @@ func TestCreateVolumeFromSnapshot(t *testing.T) { func TestCreateVolumeFromSourceVolume(t *testing.T) { properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} - // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) - osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", "", FakeVolID, &properties).Return(&FakeVolFromSourceVolume, nil) + // CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (string, string, int, error) + osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", "", FakeVolID, "", properties).Return(&FakeVolFromSourceVolume, nil) osmock.On("GetVolumesByName", FakeVolName).Return(FakeVolListEmpty, nil) // Init assert @@ -451,10 +451,12 @@ func TestListVolumes(t *testing.T) { // Test CreateSnapshot func TestCreateSnapshot(t *testing.T) { - osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, &map[string]string{cinderCSIClusterIDKey: "cluster"}).Return(&FakeSnapshotRes, nil) - osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) - osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(nil) + osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, map[string]string{cinderCSIClusterIDKey: "cluster"}).Return(&FakeSnapshotRes, nil) + osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) + osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(FakeSnapshotRes.Status, nil) + osmock.On("ListBackups", map[string]string{"Name": FakeSnapshotName}).Return(FakeBackupListEmpty, nil) + osmock.On("GetSnapshotByID", FakeVolID).Return(&FakeSnapshotRes, nil) // Init assert assert := assert.New(t) @@ -487,7 +489,7 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) { openstack.SnapshotForceCreate: "true", } - osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, &properties).Return(&FakeSnapshotRes, nil) + osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, properties).Return(&FakeSnapshotRes, nil) osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(nil) @@ -522,6 +524,7 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) { func TestDeleteSnapshot(t *testing.T) { // DeleteSnapshot(volumeID string) error osmock.On("DeleteSnapshot", FakeSnapshotID).Return(nil) + osmock.On("DeleteBackup", FakeSnapshotID).Return(nil) // Init assert assert := assert.New(t) diff --git a/pkg/csi/cinder/fake.go b/pkg/csi/cinder/fake.go index dfed9220bc..3d3efbe28d 100644 --- a/pkg/csi/cinder/fake.go +++ b/pkg/csi/cinder/fake.go @@ -17,6 +17,7 @@ limitations under the License. package cinder import ( + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/backups" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "golang.org/x/net/context" @@ -94,6 +95,7 @@ var FakeSnapshotRes = snapshots.Snapshot{ Name: "fake-snapshot", VolumeID: FakeVolID, Size: 1, + Status: "available", } var FakeSnapshotsRes = []snapshots.Snapshot{FakeSnapshotRes} @@ -102,6 +104,7 @@ var FakeVolListMultiple = []volumes.Volume{FakeVol1, FakeVol3} var FakeVolList = []volumes.Volume{FakeVol1} var FakeVolListEmpty = []volumes.Volume{} var FakeSnapshotListEmpty = []snapshots.Snapshot{} +var FakeBackupListEmpty = []backups.Backup{} var FakeInstanceID = "321a8b81-3660-43e5-bab8-6470b65ee4e8" diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go index 23058fcef9..3c1faf1fe9 100644 --- a/pkg/csi/cinder/nodeserver.go +++ b/pkg/csi/cinder/nodeserver.go @@ -150,7 +150,7 @@ func nodePublishEphemeral(req *csi.NodePublishVolumeRequest, ns *nodeServer) (*c volumeType = "" } - evol, err := ns.Cloud.CreateVolume(volName, size, volumeType, volAvailability, "", "", &properties) + evol, err := ns.Cloud.CreateVolume(volName, size, volumeType, volAvailability, "", "", "", properties) if err != nil { klog.V(3).Infof("Failed to Create Ephemeral Volume: %v", err) diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index 8e67d6f70c..dbfeb6144b 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -129,7 +129,7 @@ func TestNodePublishVolumeEphermeral(t *testing.T) { fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID) tState := []string{"available"} - omock.On("CreateVolume", fvolName, 2, "test", "nova", "", "", &properties).Return(&FakeVol, nil) + omock.On("CreateVolume", fvolName, 2, "test", "nova", "", "", "", properties).Return(&FakeVol, nil) omock.On("AttachVolume", FakeNodeID, FakeVolID).Return(FakeVolID, nil) omock.On("WaitDiskAttached", FakeNodeID, FakeVolID).Return(nil) diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index e3a76a167f..4782e43545 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -23,6 +23,7 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/backups" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" @@ -44,7 +45,7 @@ func AddExtraFlags(fs *pflag.FlagSet) { } type IOpenStack interface { - CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourcevolID string, tags *map[string]string) (*volumes.Volume, error) + CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (*volumes.Volume, error) DeleteVolume(volumeID string) error AttachVolume(instanceID, volumeID string) (string, error) ListVolumes(limit int, startingToken string) ([]volumes.Volume, string, error) @@ -55,11 +56,17 @@ type IOpenStack interface { GetAttachmentDiskPath(instanceID, volumeID string) (string, error) GetVolume(volumeID string) (*volumes.Volume, error) GetVolumesByName(name string) ([]volumes.Volume, error) - CreateSnapshot(name, volID string, tags *map[string]string) (*snapshots.Snapshot, error) + CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) ListSnapshots(filters map[string]string) ([]snapshots.Snapshot, string, error) DeleteSnapshot(snapID string) error GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) - WaitSnapshotReady(snapshotID string) error + WaitSnapshotReady(snapshotID string) (string, error) + CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) + ListBackups(filters map[string]string) ([]backups.Backup, error) + DeleteBackup(backupID string) error + GetBackupByID(backupID string) (*backups.Backup, error) + BackupsAreEnabled() (bool, error) + WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) GetInstanceByID(instanceID string) (*servers.Server, error) ExpandVolume(volumeID string, status string, size int) error GetMaxVolLimit() int64 diff --git a/pkg/csi/cinder/openstack/openstack_backups.go b/pkg/csi/cinder/openstack/openstack_backups.go new file mode 100644 index 0000000000..8ddc05cff0 --- /dev/null +++ b/pkg/csi/cinder/openstack/openstack_backups.go @@ -0,0 +1,221 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package openstack backups provides an implementation of Cinder Backup features +// cinder functions using Gophercloud. +package openstack + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/backups" + "golang.org/x/net/context" + "k8s.io/cloud-provider-openstack/pkg/metrics" + "k8s.io/klog/v2" +) + +const ( + backupReadyStatus = "available" + backupErrorStatus = "error" + backupBinary = "cinder-backup" + backupDescription = "Created by OpenStack Cinder CSI driver" + BackupMaxDurationSecondsPerGBDefault = 20 + BackupMaxDurationPerGB = "backup-max-duration-seconds-per-gb" + backupBaseDurationSeconds = 30 + backupReadyCheckIntervalSeconds = 7 +) + +// CreateBackup issues a request to create a Backup from the specified Snapshot with the corresponding ID and +// returns the resultant gophercloud Backup Item upon success. +func (os *OpenStack) CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) { + blockstorageServiceClient, err := openstack.NewBlockStorageV3(os.blockstorage.ProviderClient, os.epOpts) + if err != nil { + return &backups.Backup{}, err + } + + force := false + // if no flag given, then force will be false by default + // if flag it given , check it + if item, ok := (tags)[SnapshotForceCreate]; ok { + var err error + force, err = strconv.ParseBool(item) + if err != nil { + klog.V(5).Infof("Make force create flag to false due to: %v", err) + } + delete(tags, SnapshotForceCreate) + } + + opts := &backups.CreateOpts{ + VolumeID: volID, + SnapshotID: snapshotID, + Name: name, + Force: force, + Description: backupDescription, + } + + if tags != nil { + // Set openstack microversion to 3.43 to send metadata along with the backup + blockstorageServiceClient.Microversion = "3.43" + opts.Metadata = tags + } + + // TODO: Do some check before really call openstack API on the input + mc := metrics.NewMetricContext("backup", "create") + backup, err := backups.Create(blockstorageServiceClient, opts).Extract() + if mc.ObserveRequest(err) != nil { + return &backups.Backup{}, err + } + // There's little value in rewrapping these gophercloud types into yet another abstraction/type, instead just + // return the gophercloud item + return backup, nil +} + +// ListBackups retrieves a list of active backups from Cinder for the corresponding Tenant. We also +// provide the ability to provide limit and offset to enable the consumer to provide accurate pagination. +// In addition the filters argument provides a mechanism for passing in valid filter strings to the list +// operation. Valid filter keys are: Name, Status, VolumeID, Limit, Marker (TenantID has no effect). +func (os *OpenStack) ListBackups(filters map[string]string) ([]backups.Backup, error) { + var allBackups []backups.Backup + + // Build the Opts + opts := backups.ListOpts{} + for key, val := range filters { + switch key { + case "Status": + opts.Status = val + case "Name": + opts.Name = val + case "VolumeID": + opts.VolumeID = val + case "Marker": + opts.Marker = val + case "Limit": + opts.Limit, _ = strconv.Atoi(val) + default: + klog.V(3).Infof("Not a valid filter key %s", key) + } + } + mc := metrics.NewMetricContext("backup", "list") + + allPages, err := backups.List(os.blockstorage, opts).AllPages() + if err != nil { + return nil, err + } + allBackups, err = backups.ExtractBackups(allPages) + if err != nil { + return nil, err + } + + if mc.ObserveRequest(err) != nil { + return nil, err + } + + return allBackups, nil +} + +// DeleteBackup issues a request to delete the Backup with the specified ID from the Cinder backend. +func (os *OpenStack) DeleteBackup(backupID string) error { + mc := metrics.NewMetricContext("backup", "delete") + err := backups.Delete(os.blockstorage, backupID).ExtractErr() + if mc.ObserveRequest(err) != nil { + klog.Errorf("Failed to delete backup: %v", err) + } + return err +} + +// GetBackupByID returns backup details by id. +func (os *OpenStack) GetBackupByID(backupID string) (*backups.Backup, error) { + mc := metrics.NewMetricContext("backup", "get") + backup, err := backups.Get(os.blockstorage, backupID).Extract() + if mc.ObserveRequest(err) != nil { + klog.Errorf("Failed to get backup: %v", err) + return nil, err + } + return backup, nil +} + +func (os *OpenStack) BackupsAreEnabled() (bool, error) { + // TODO: Check if the backup service is enabled + return true, nil +} + +// WaitBackupReady waits until backup is ready. It waits longer depending on +// the size of the corresponding snapshot. +func (os *OpenStack) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { + var err error + + duration := time.Duration(backupMaxDurationSecondsPerGB*snapshotSize + backupBaseDurationSeconds) + + err = os.waitBackupReadyWithContext(backupID, duration) + if err == context.DeadlineExceeded { + err = fmt.Errorf("timeout, Backup %s is still not Ready: %v", backupID, err) + } + + back, _ := os.GetBackupByID(backupID) + + if back != nil { + return back.Status, err + } else { + return "Failed to get backup status", err + } +} + +// Supporting function for WaitBackupReady(). +// Allows for a timeout while waiting for the backup to be ready. +func (os *OpenStack) waitBackupReadyWithContext(backupID string, duration time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), duration*time.Second) + defer cancel() + var done bool + var err error + ticker := time.NewTicker(backupReadyCheckIntervalSeconds * time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + done, err = os.backupIsReady(backupID) + if err != nil { + return err + } + + if done { + return nil + } + case <-ctx.Done(): + return ctx.Err() + } + } + +} + +// Supporting function for waitBackupReadyWithContext(). +// Returns true when the backup is ready. +func (os *OpenStack) backupIsReady(backupID string) (bool, error) { + backup, err := os.GetBackupByID(backupID) + if err != nil { + return false, err + } + + if backup.Status == backupErrorStatus { + return false, errors.New("backup is in error state") + } + + return backup.Status == backupReadyStatus, nil +} diff --git a/pkg/csi/cinder/openstack/openstack_mock.go b/pkg/csi/cinder/openstack/openstack_mock.go index 5c52cf0fbf..481e0157fc 100644 --- a/pkg/csi/cinder/openstack/openstack_mock.go +++ b/pkg/csi/cinder/openstack/openstack_mock.go @@ -17,6 +17,7 @@ limitations under the License. package openstack import ( + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/backups" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" @@ -41,6 +42,18 @@ var fakeSnapshot = snapshots.Snapshot{ Metadata: make(map[string]string), } +var fakemap = make(map[string]string) + +var fakeBackup = backups.Backup{ + ID: "eb5e4e9a-a4e5-4728-a748-04f9e2868573", + Name: "fake-snapshot", + Status: "available", + Size: 1, + VolumeID: "CSIVolumeID", + SnapshotID: "261a8b81-3660-43e5-bab8-6470b65ee4e8", + Metadata: &fakemap, +} + // revive:disable:exported // OpenStackMock is an autogenerated mock type for the IOpenStack type // ORIGINALLY GENERATED BY mockery with hand edits @@ -72,19 +85,19 @@ func (_m *OpenStackMock) AttachVolume(instanceID string, volumeID string) (strin } // CreateVolume provides a mock function with given fields: name, size, vtype, availability, tags -func (_m *OpenStackMock) CreateVolume(name string, size int, vtype string, availability string, snapshotID string, sourceVolID string, tags *map[string]string) (*volumes.Volume, error) { - ret := _m.Called(name, size, vtype, availability, snapshotID, sourceVolID, tags) +func (_m *OpenStackMock) CreateVolume(name string, size int, vtype string, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (*volumes.Volume, error) { + ret := _m.Called(name, size, vtype, availability, snapshotID, sourceVolID, sourceBackupID, tags) var r0 *volumes.Volume - if rf, ok := ret.Get(0).(func(string, int, string, string, string, string, *map[string]string) *volumes.Volume); ok { - r0 = rf(name, size, vtype, availability, snapshotID, sourceVolID, tags) + if rf, ok := ret.Get(0).(func(string, int, string, string, string, string, string, map[string]string) *volumes.Volume); ok { + r0 = rf(name, size, vtype, availability, snapshotID, sourceVolID, sourceBackupID, tags) } else { r0 = ret.Get(0).(*volumes.Volume) } var r1 error - if rf, ok := ret.Get(1).(func(string, int, string, string, string, string, *map[string]string) error); ok { - r1 = rf(name, size, vtype, availability, snapshotID, sourceVolID, tags) + if rf, ok := ret.Get(1).(func(string, int, string, string, string, string, string, map[string]string) error); ok { + r1 = rf(name, size, vtype, availability, snapshotID, sourceVolID, sourceBackupID, tags) } else { r1 = ret.Error(1) } @@ -245,11 +258,11 @@ func (_m *OpenStackMock) ListSnapshots(filters map[string]string) ([]snapshots.S } // CreateSnapshot provides a mock function with given fields: name, volID, tags -func (_m *OpenStackMock) CreateSnapshot(name string, volID string, tags *map[string]string) (*snapshots.Snapshot, error) { +func (_m *OpenStackMock) CreateSnapshot(name string, volID string, tags map[string]string) (*snapshots.Snapshot, error) { ret := _m.Called(name, volID, tags) var r0 *snapshots.Snapshot - if rf, ok := ret.Get(0).(func(string, string, *map[string]string) *snapshots.Snapshot); ok { + if rf, ok := ret.Get(0).(func(string, string, map[string]string) *snapshots.Snapshot); ok { r0 = rf(name, volID, tags) } else { if ret.Get(0) != nil { @@ -258,7 +271,7 @@ func (_m *OpenStackMock) CreateSnapshot(name string, volID string, tags *map[str } var r1 error - if rf, ok := ret.Get(1).(func(string, string, *map[string]string) error); ok { + if rf, ok := ret.Get(1).(func(string, string, map[string]string) error); ok { r1 = rf(name, volID, tags) } else { r1 = ret.Error(1) @@ -281,6 +294,62 @@ func (_m *OpenStackMock) DeleteSnapshot(snapID string) error { return r0 } +func (_m *OpenStackMock) ListBackups(filters map[string]string) ([]backups.Backup, error) { + ret := _m.Called(filters) + + var r0 []backups.Backup + if rf, ok := ret.Get(0).(func(map[string]string) []backups.Backup); ok { + r0 = rf(filters) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]backups.Backup) + } + } + var r1 error + if rf, ok := ret.Get(1).(func(map[string]string) error); ok { + r1 = rf(filters) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +func (_m *OpenStackMock) CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) { + ret := _m.Called(name, volID, snapshotID, tags) + + var r0 *backups.Backup + if rf, ok := ret.Get(0).(func(string, string, string, map[string]string) *backups.Backup); ok { + r0 = rf(name, volID, snapshotID, tags) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*backups.Backup) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string, string, map[string]string) error); ok { + r1 = rf(name, volID, snapshotID, tags) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +func (_m *OpenStackMock) DeleteBackup(backupID string) error { + ret := _m.Called(backupID) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(backupID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ListVolumes provides a mock function without param func (_m *OpenStackMock) ListVolumes(limit int, marker string) ([]volumes.Volume, string, error) { ret := _m.Called(limit, marker) @@ -342,23 +411,59 @@ func (_m *OpenStackMock) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot return &fakeSnapshot, nil } -func (_m *OpenStackMock) WaitSnapshotReady(snapshotID string) error { +func (_m *OpenStackMock) WaitSnapshotReady(snapshotID string) (string, error) { ret := _m.Called(snapshotID) - var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { r0 = rf(snapshotID) } else { - r0 = ret.Error(0) + r0 = ret.String(0) } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(snapshotID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +func (_m *OpenStackMock) GetBackupByID(backupID string) (*backups.Backup, error) { + + return &fakeBackup, nil +} + +func (_m *OpenStackMock) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { + ret := _m.Called(backupID) + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(backupID) + } else { + r0 = ret.String(0) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(backupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } func (_m *OpenStackMock) GetMaxVolLimit() int64 { return 256 } +func (_m *OpenStackMock) BackupsAreEnabled() (bool, error) { + return true, nil +} + func (_m *OpenStackMock) GetInstanceByID(instanceID string) (*servers.Server, error) { return nil, nil } diff --git a/pkg/csi/cinder/openstack/openstack_snapshots.go b/pkg/csi/cinder/openstack/openstack_snapshots.go index 6337e5313c..bd2f94365f 100644 --- a/pkg/csi/cinder/openstack/openstack_snapshots.go +++ b/pkg/csi/cinder/openstack/openstack_snapshots.go @@ -39,23 +39,24 @@ const ( snapshotDescription = "Created by OpenStack Cinder CSI driver" SnapshotForceCreate = "force-create" + SnapshotType = "type" ) // CreateSnapshot issues a request to take a Snapshot of the specified Volume with the corresponding ID and // returns the resultant gophercloud Snapshot Item upon success -func (os *OpenStack) CreateSnapshot(name, volID string, tags *map[string]string) (*snapshots.Snapshot, error) { +func (os *OpenStack) CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) { force := false // if no flag given, then force will be false by default // if flag it given , check it - if item, ok := (*tags)[SnapshotForceCreate]; ok { + if item, ok := (tags)[SnapshotForceCreate]; ok { var err error force, err = strconv.ParseBool(item) if err != nil { klog.V(5).Infof("Make force create flag to false due to: %v", err) } - delete(*tags, SnapshotForceCreate) + delete(tags, SnapshotForceCreate) } // Force the creation of snapshot even the Volume is in in-use state opts := &snapshots.CreateOpts{ @@ -65,7 +66,7 @@ func (os *OpenStack) CreateSnapshot(name, volID string, tags *map[string]string) Force: force, } if tags != nil { - opts.Metadata = *tags + opts.Metadata = tags } // TODO: Do some check before really call openstack API on the input mc := metrics.NewMetricContext("snapshot", "create") @@ -157,7 +158,7 @@ func (os *OpenStack) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, er } // WaitSnapshotReady waits till snapshot is ready -func (os *OpenStack) WaitSnapshotReady(snapshotID string) error { +func (os *OpenStack) WaitSnapshotReady(snapshotID string) (string, error) { backoff := wait.Backoff{ Duration: snapReadyDuration, Factor: snapReadyFactor, @@ -173,10 +174,16 @@ func (os *OpenStack) WaitSnapshotReady(snapshotID string) error { }) if wait.Interrupted(err) { - err = fmt.Errorf("timeout, Snapshot %s is still not Ready %v", snapshotID, err) + err = fmt.Errorf("timeout, Snapshot %s is still not Ready %v", snapshotID, err) } - return err + snap, _ := os.GetSnapshotByID(snapshotID) + + if snap != nil { + return snap.Status, err + } else { + return "Failed to get snapshot status", err + } } func (os *OpenStack) snapshotIsReady(snapshotID string) (bool, error) { diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go index 9ef45b455a..96550975d3 100644 --- a/pkg/csi/cinder/openstack/openstack_volumes.go +++ b/pkg/csi/cinder/openstack/openstack_volumes.go @@ -51,7 +51,7 @@ const ( var volumeErrorStates = [...]string{"error", "error_extending", "error_deleting"} // CreateVolume creates a volume of given size -func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourcevolID string, tags *map[string]string) (*volumes.Volume, error) { +func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (*volumes.Volume, error) { opts := &volumes.CreateOpts{ Name: name, @@ -60,14 +60,26 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str AvailabilityZone: availability, Description: volumeDescription, SnapshotID: snapshotID, - SourceVolID: sourcevolID, + SourceVolID: sourceVolID, + BackupID: sourceBackupID, } if tags != nil { - opts.Metadata = *tags + opts.Metadata = tags + } + + blockstorageClient, err := openstack.NewBlockStorageV3(os.blockstorage.ProviderClient, os.epOpts) + if err != nil { + return nil, err + } + + // creating volumes from backups is available since 3.47 microversion + // https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id47 + if !os.bsOpts.IgnoreVolumeMicroversion && sourceBackupID != "" { + blockstorageClient.Microversion = "3.47" } mc := metrics.NewMetricContext("volume", "create") - vol, err := volumes.Create(os.blockstorage, opts).Extract() + vol, err := volumes.Create(blockstorageClient, opts).Extract() if mc.ObserveRequest(err) != nil { return nil, err } diff --git a/tests/sanity/cinder/fakecloud.go b/tests/sanity/cinder/fakecloud.go index d1bbed949f..f04f21fc67 100644 --- a/tests/sanity/cinder/fakecloud.go +++ b/tests/sanity/cinder/fakecloud.go @@ -7,6 +7,7 @@ import ( "time" "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/backups" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" @@ -19,6 +20,7 @@ type cloud struct { volumes map[string]*volumes.Volume snapshots map[string]*snapshots.Snapshot instances map[string]*servers.Server + backups map[string]*backups.Backup } func getfakecloud() *cloud { @@ -32,7 +34,7 @@ func getfakecloud() *cloud { var _ openstack.IOpenStack = &cloud{} // Fake Cloud -func (cloud *cloud) CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, tags *map[string]string) (*volumes.Volume, error) { +func (cloud *cloud) CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (*volumes.Volume, error) { vol := &volumes.Volume{ ID: randString(10), @@ -43,6 +45,7 @@ func (cloud *cloud) CreateVolume(name string, size int, vtype, availability stri AvailabilityZone: availability, SnapshotID: snapshotID, SourceVolID: sourceVolID, + BackupID: &sourceBackupID, } cloud.volumes[vol.ID] = vol @@ -149,7 +152,7 @@ func invalidError() error { return gophercloud.ErrDefault400{} } -func (cloud *cloud) CreateSnapshot(name, volID string, tags *map[string]string) (*snapshots.Snapshot, error) { +func (cloud *cloud) CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) { snap := &snapshots.Snapshot{ ID: randString(10), @@ -220,10 +223,84 @@ func (cloud *cloud) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, err return snap, nil } -func (cloud *cloud) WaitSnapshotReady(snapshotID string) error { +func (cloud *cloud) WaitSnapshotReady(snapshotID string) (string, error) { + return "available", nil +} + +func (cloud *cloud) CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) { + + backup := &backups.Backup{ + ID: randString(10), + Name: name, + Status: "available", + VolumeID: volID, + SnapshotID: snapshotID, + CreatedAt: time.Now(), + } + + cloud.backups[backup.ID] = backup + return backup, nil +} + +func (cloud *cloud) ListBackups(filters map[string]string) ([]backups.Backup, error) { + var backuplist []backups.Backup + startingToken := filters["Marker"] + limitfilter := filters["Limit"] + limit, _ := strconv.Atoi(limitfilter) + name := filters["Name"] + volumeID := filters["VolumeID"] + + for _, value := range cloud.backups { + if volumeID != "" { + if value.VolumeID == volumeID { + backuplist = append(backuplist, *value) + break + } + } else if name != "" { + if value.Name == name { + backuplist = append(backuplist, *value) + break + } + } else { + backuplist = append(backuplist, *value) + } + } + + if startingToken != "" { + t, _ := strconv.Atoi(startingToken) + backuplist = backuplist[t:] + } + if limit != 0 { + backuplist = backuplist[:limit] + } + + return backuplist, nil +} + +func (cloud *cloud) DeleteBackup(backupID string) error { + delete(cloud.backups, backupID) + return nil } +func (cloud *cloud) GetBackupByID(backupID string) (*backups.Backup, error) { + backup, ok := cloud.backups[backupID] + + if !ok { + return nil, notFoundError() + } + + return backup, nil +} + +func (cloud *cloud) BackupsAreEnabled() (bool, error) { + return true, nil +} + +func (cloud *cloud) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { + return "", nil +} + func randString(n int) string { const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" b := make([]byte, n) From d9106b315c509738ed3728a8d248154650c1b7cf Mon Sep 17 00:00:00 2001 From: Chris Werner Rau Date: Tue, 20 Feb 2024 21:19:36 +0100 Subject: [PATCH 07/23] fix: add missing labels to resources (#2519) * fix: add missing labels to resources otherwise upgrades fail with `resource already exists, add missing labels` * chore: bump version --- .../openstack-cloud-controller-manager/Chart.yaml | 8 ++++++-- .../templates/_helpers.tpl | 13 ------------- .../templates/clusterrole.yaml | 1 + .../templates/clusterrolebinding-sm.yaml | 1 + .../templates/clusterrolebinding.yaml | 1 + .../templates/daemonset.yaml | 3 +-- .../templates/secret.yaml | 1 + .../templates/service-sm.yaml | 3 +-- .../templates/serviceaccount.yaml | 1 + .../templates/servicemonitor.yaml | 3 +-- 10 files changed, 14 insertions(+), 21 deletions(-) diff --git a/charts/openstack-cloud-controller-manager/Chart.yaml b/charts/openstack-cloud-controller-manager/Chart.yaml index ff39ef84e9..d7065b9f55 100644 --- a/charts/openstack-cloud-controller-manager/Chart.yaml +++ b/charts/openstack-cloud-controller-manager/Chart.yaml @@ -1,11 +1,15 @@ -apiVersion: v1 +apiVersion: v2 appVersion: v1.29.0 description: Openstack Cloud Controller Manager Helm Chart icon: https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/OpenStack-Logo-Vertical.png home: https://github.com/kubernetes/cloud-provider-openstack name: openstack-cloud-controller-manager -version: 2.29.0 +version: 2.29.1 maintainers: - name: eumel8 email: f.kloeker@telekom.de url: https://www.telekom.com +dependencies: + - name: common + version: 2.14.1 + repository: https://charts.bitnami.com/bitnami diff --git a/charts/openstack-cloud-controller-manager/templates/_helpers.tpl b/charts/openstack-cloud-controller-manager/templates/_helpers.tpl index 098dc0b378..411e157324 100644 --- a/charts/openstack-cloud-controller-manager/templates/_helpers.tpl +++ b/charts/openstack-cloud-controller-manager/templates/_helpers.tpl @@ -12,19 +12,6 @@ Create chart name and version as used by the chart label. {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{/* -Common labels and app labels -*/}} -{{- define "occm.labels" -}} -app.kubernetes.io/name: {{ include "occm.name" . }} -helm.sh/chart: {{ include "occm.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - {{- define "occm.common.matchLabels" -}} app: {{ template "occm.name" . }} release: {{ .Release.Name }} diff --git a/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml b/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml index 6786931f41..cf03f8a11a 100644 --- a/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml +++ b/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml @@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ .Values.clusterRoleName }} + labels: {{- include "common.labels.standard" . | nindent 4 }} annotations: {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 4 }} diff --git a/charts/openstack-cloud-controller-manager/templates/clusterrolebinding-sm.yaml b/charts/openstack-cloud-controller-manager/templates/clusterrolebinding-sm.yaml index 88d9aedf6e..f2ee6ac5f1 100644 --- a/charts/openstack-cloud-controller-manager/templates/clusterrolebinding-sm.yaml +++ b/charts/openstack-cloud-controller-manager/templates/clusterrolebinding-sm.yaml @@ -3,6 +3,7 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: system:{{ include "occm.name" . }}:auth-delegate + labels: {{- include "common.labels.standard" . | nindent 4 }} annotations: {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 4 }} diff --git a/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml b/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml index a572710908..62906bfbca 100644 --- a/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml +++ b/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml @@ -3,6 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ .Values.clusterRoleName }} + labels: {{- include "common.labels.standard" . | nindent 4 }} annotations: {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 4 }} diff --git a/charts/openstack-cloud-controller-manager/templates/daemonset.yaml b/charts/openstack-cloud-controller-manager/templates/daemonset.yaml index 1808dee94f..1d1b74d66c 100644 --- a/charts/openstack-cloud-controller-manager/templates/daemonset.yaml +++ b/charts/openstack-cloud-controller-manager/templates/daemonset.yaml @@ -2,9 +2,8 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: {{ include "occm.name" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} namespace: {{ .Release.Namespace }} - labels: - {{- include "occm.labels" . | nindent 4 }} annotations: {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 4 }} diff --git a/charts/openstack-cloud-controller-manager/templates/secret.yaml b/charts/openstack-cloud-controller-manager/templates/secret.yaml index 66c6352ea3..b745fa25f6 100644 --- a/charts/openstack-cloud-controller-manager/templates/secret.yaml +++ b/charts/openstack-cloud-controller-manager/templates/secret.yaml @@ -3,6 +3,7 @@ apiVersion: v1 kind: Secret metadata: name: {{ .Values.secret.name | default "cloud-config" }} + labels: {{- include "common.labels.standard" . | nindent 4 }} namespace: {{ .Release.Namespace }} annotations: {{- with .Values.commonAnnotations }} diff --git a/charts/openstack-cloud-controller-manager/templates/service-sm.yaml b/charts/openstack-cloud-controller-manager/templates/service-sm.yaml index 30ae3fe9de..92b07f865e 100644 --- a/charts/openstack-cloud-controller-manager/templates/service-sm.yaml +++ b/charts/openstack-cloud-controller-manager/templates/service-sm.yaml @@ -2,9 +2,8 @@ apiVersion: v1 kind: Service metadata: - labels: - {{- include "occm.labels" . | nindent 4 }} name: {{ include "occm.name" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} namespace: {{ .Release.Namespace }} annotations: {{- with .Values.commonAnnotations }} diff --git a/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml b/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml index f97f1c8a65..dfac2b15fa 100644 --- a/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml +++ b/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ .Values.serviceAccountName }} + labels: {{- include "common.labels.standard" . | nindent 4 }} namespace: {{ .Release.Namespace }} annotations: {{- with .Values.commonAnnotations }} diff --git a/charts/openstack-cloud-controller-manager/templates/servicemonitor.yaml b/charts/openstack-cloud-controller-manager/templates/servicemonitor.yaml index 2e728add50..1663fb3b56 100644 --- a/charts/openstack-cloud-controller-manager/templates/servicemonitor.yaml +++ b/charts/openstack-cloud-controller-manager/templates/servicemonitor.yaml @@ -2,9 +2,8 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - labels: - {{- include "occm.labels" . | nindent 4 }} name: {{ include "occm.name" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} namespace: {{ .Release.Namespace }} annotations: {{- with .Values.commonAnnotations }} From 89d264f711db17e7e492512ae3b12d2f75f20aa3 Mon Sep 17 00:00:00 2001 From: Cyril Connan Date: Thu, 29 Feb 2024 13:17:02 +0100 Subject: [PATCH 08/23] [octavia-ingress-controller] Add annotations to keep floating IP and/or specify an existing floating IP (#2166) * Add annotation to keep floationIP * Add annotation to specify floating ip to use on LB when creating ingress * Add doc for octavia.ingress.kubernetes.io/keep-floatingip & octavia.ingress.kubernetes.io/floatingip annotations * Remove debug logs * Change annotation syntax, don't create a new FIP, if user requested a particular one, add additional check if FIP already binded to correct port, add ability to update FIP of an existing ingress by updating annotation * Add missing else * Log format * Create fonctions to attach/detach fips to port * Fix bug when no fip provided in annotation the lb was created in private mode and improve openstack neutron fip logic --- .../using-octavia-ingress-controller.md | 48 ++++++++++ pkg/ingress/controller/controller.go | 52 ++++++++--- pkg/ingress/controller/openstack/neutron.go | 89 +++++++++++++++++-- 3 files changed, 169 insertions(+), 20 deletions(-) diff --git a/docs/octavia-ingress-controller/using-octavia-ingress-controller.md b/docs/octavia-ingress-controller/using-octavia-ingress-controller.md index d5ab055910..dbad15ebd5 100644 --- a/docs/octavia-ingress-controller/using-octavia-ingress-controller.md +++ b/docs/octavia-ingress-controller/using-octavia-ingress-controller.md @@ -15,6 +15,7 @@ - [Create an Ingress resource](#create-an-ingress-resource) - [Enable TLS encryption](#enable-tls-encryption) - [Allow CIDRs](#allow-cidrs) + - [Creating Ingress by specifying a floating IP](#creating-ingress-by-specifying-a-floating-ip) @@ -504,3 +505,50 @@ spec: port: number: 8080 ``` + +## Creating Ingress by specifying a floating IP + +Sometimes it's useful to use an existing available floating IP rather than creating a new one, especially in the automation scenario. In the example below, 122.112.219.229 is an available floating IP created in the OpenStack Networking service. + +You can also specify to not delete the floating IP when the ingress will be deleted. By default, if not specified, the floating IP +is deleted with the loadbalancer when the ingress if removed on kubernetes. + +Create a new depolyment: +```shell script +kubectl create deployment test-web --replicas 3 --image nginx --port 80 +``` + +Create a service type NodePort: +```shell script +kubectl expose deployment test-web --type NodePort +``` + +Create an ingress using a specific floating IP: +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: test-web-ingress + annotations: + kubernetes.io/ingress.class: "openstack" + octavia.ingress.kubernetes.io/internal: "false" + octavia.ingress.kubernetes.io/keep-floatingip: "true" # floating ip will not be deleted when ingress is deleted + octavia.ingress.kubernetes.io/floatingip: "122.112.219.229" # define the floating to use +spec: + rules: + - host: test-web.foo.bar.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: test-web + port: + number: 80 +``` + +If the floating IP is available you can test it with: +```shell script +curl -H "host: test-web.foo.bar.com" http://122.112.219.229 +``` diff --git a/pkg/ingress/controller/controller.go b/pkg/ingress/controller/controller.go index c81cf28ca5..249813098c 100644 --- a/pkg/ingress/controller/controller.go +++ b/pkg/ingress/controller/controller.go @@ -101,6 +101,16 @@ const ( // Default to true. IngressAnnotationInternal = "octavia.ingress.kubernetes.io/internal" + // IngressAnnotationLoadBalancerKeepFloatingIP is the annotation used on the Ingress + // to indicate that we want to keep the floatingIP after the ingress deletion. The Octavia LoadBalancer will be deleted + // but not the floatingIP. That mean this floatingIP can be reused on another ingress without editing the dns area or update the whitelist. + // Default to false. + IngressAnnotationLoadBalancerKeepFloatingIP = "octavia.ingress.kubernetes.io/keep-floatingip" + + // IngressAnnotationFloatingIp is the key of the annotation on an ingress to set floating IP that will be associated to LoadBalancers. + // If the floatingIP is not available, an error will be returned. + IngressAnnotationFloatingIP = "octavia.ingress.kubernetes.io/floatingip" + // IngressAnnotationSourceRangesKey is the key of the annotation on an ingress to set allowed IP ranges on their LoadBalancers. // It should be a comma-separated list of CIDRs. IngressAnnotationSourceRangesKey = "octavia.ingress.kubernetes.io/whitelist-source-range" @@ -589,15 +599,24 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { return nil } - // Delete the floating IP for the load balancer VIP. We don't check if the Ingress is internal or not, just delete - // any floating IPs associated with the load balancer VIP port. - logger.Debug("deleting floating IP") - - if _, err = c.osClient.EnsureFloatingIP(true, loadbalancer.VipPortID, "", ""); err != nil { - return fmt.Errorf("failed to delete floating IP: %v", err) + // Manage the floatingIP + keepFloatingSetting := getStringFromIngressAnnotation(ing, IngressAnnotationLoadBalancerKeepFloatingIP, "false") + keepFloating, err := strconv.ParseBool(keepFloatingSetting) + if err != nil { + return fmt.Errorf("unknown annotation %s: %v", IngressAnnotationLoadBalancerKeepFloatingIP, err) } - logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Info("VIP or floating IP deleted") + if !keepFloating { + // Delete the floating IP for the load balancer VIP. We don't check if the Ingress is internal or not, just delete + // any floating IPs associated with the load balancer VIP port. + logger.WithFields(log.Fields{"lbID": loadbalancer.ID, "VIP": loadbalancer.VipAddress}).Info("deleting floating IPs associated with the load balancer VIP port") + + if _, err = c.osClient.EnsureFloatingIP(true, loadbalancer.VipPortID, "", "", ""); err != nil { + return fmt.Errorf("failed to delete floating IP: %v", err) + } + + logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Info("VIP or floating IP deleted") + } // Delete security group managed for the Ingress backend service if c.config.Octavia.ManageSecurityGroups { @@ -934,15 +953,24 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { address := lb.VipAddress // Allocate floating ip for loadbalancer vip if the external network is configured and the Ingress is not internal. if !isInternal && c.config.Octavia.FloatingIPNetwork != "" { - logger.Info("creating floating IP") - description := fmt.Sprintf("Floating IP for Kubernetes ingress %s in namespace %s from cluster %s", ingName, ingNamespace, clusterName) - address, err = c.osClient.EnsureFloatingIP(false, lb.VipPortID, c.config.Octavia.FloatingIPNetwork, description) + floatingIPSetting := getStringFromIngressAnnotation(ing, IngressAnnotationFloatingIP, "") if err != nil { - return fmt.Errorf("failed to create floating IP: %v", err) + return fmt.Errorf("unknown annotation %s: %v", IngressAnnotationFloatingIP, err) } - logger.WithFields(log.Fields{"fip": address}).Info("floating IP created") + description := fmt.Sprintf("Floating IP for Kubernetes ingress %s in namespace %s from cluster %s", ingName, ingNamespace, clusterName) + + if floatingIPSetting != "" { + logger.Info("try to use floating IP: ", floatingIPSetting) + } else { + logger.Info("creating new floating IP") + } + address, err = c.osClient.EnsureFloatingIP(false, lb.VipPortID, floatingIPSetting, c.config.Octavia.FloatingIPNetwork, description) + if err != nil { + return fmt.Errorf("failed to use provided floating IP %s : %v", floatingIPSetting, err) + } + logger.Info("floating IP ", address, " configured") } // Update ingress status diff --git a/pkg/ingress/controller/openstack/neutron.go b/pkg/ingress/controller/openstack/neutron.go index 7e68cedd52..0a3c0a0684 100644 --- a/pkg/ingress/controller/openstack/neutron.go +++ b/pkg/ingress/controller/openstack/neutron.go @@ -47,6 +47,33 @@ func (os *OpenStack) getFloatingIPs(listOpts floatingips.ListOpts) ([]floatingip return allFIPs, nil } +func (os *OpenStack) createFloatingIP(portID string, floatingNetworkID string, description string) (*floatingips.FloatingIP, error) { + floatIPOpts := floatingips.CreateOpts{ + PortID: portID, + FloatingNetworkID: floatingNetworkID, + Description: description, + } + return floatingips.Create(os.neutron, floatIPOpts).Extract() +} + +// associateFloatingIP associate an unused floating IP to a given Port +func (os *OpenStack) associateFloatingIP(fip *floatingips.FloatingIP, portID string, description string) (*floatingips.FloatingIP, error) { + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + Description: &description, + } + return floatingips.Update(os.neutron, fip.ID, updateOpts).Extract() +} + +// disassociateFloatingIP disassociate a floating IP from a port +func (os *OpenStack) disassociateFloatingIP(fip *floatingips.FloatingIP, description string) (*floatingips.FloatingIP, error) { + updateDisassociateOpts := floatingips.UpdateOpts{ + PortID: new(string), + Description: &description, + } + return floatingips.Update(os.neutron, fip.ID, updateDisassociateOpts).Extract() +} + // GetSubnet get a subnet by the given ID. func (os *OpenStack) GetSubnet(subnetID string) (*subnets.Subnet, error) { subnet, err := subnets.Get(os.neutron, subnetID).Extract() @@ -71,7 +98,7 @@ func (os *OpenStack) getPorts(listOpts ports.ListOpts) ([]ports.Port, error) { } // EnsureFloatingIP makes sure a floating IP is allocated for the port -func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, floatingIPNetwork string, description string) (string, error) { +func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfloatingIP string, floatingIPNetwork string, description string) (string, error) { listOpts := floatingips.ListOpts{PortID: portID} fips, err := os.getFloatingIPs(listOpts) if err != nil { @@ -94,18 +121,64 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, floatingIP } var fip *floatingips.FloatingIP - if len(fips) == 0 { - floatIPOpts := floatingips.CreateOpts{ - PortID: portID, + + if existingfloatingIP == "" { + if len(fips) == 1 { + fip = &fips[0] + } else { + fip, err = os.createFloatingIP(portID, floatingIPNetwork, description) + if err != nil { + return "", err + } + } + } else { + // if user provide FIP + // check if provided fip is available + opts := floatingips.ListOpts{ + FloatingIP: existingfloatingIP, FloatingNetworkID: floatingIPNetwork, - Description: description, } - fip, err = floatingips.Create(os.neutron, floatIPOpts).Extract() + osFips, err := os.getFloatingIPs(opts) if err != nil { return "", err } - } else { - fip = &fips[0] + if len(osFips) != 1 { + return "", fmt.Errorf("error when searching floating IPs %s, %d floating IPs found", existingfloatingIP, len(osFips)) + } + // check if fip is already attached to the correct port + if osFips[0].PortID == portID { + return osFips[0].FloatingIP, nil + } + // check if fip is already used by other port + // We might consider if here we shouldn't detach that FIP instead of returning error + if osFips[0].PortID != "" { + return "", fmt.Errorf("floating IP %s already used by port %s", osFips[0].FloatingIP, osFips[0].PortID) + } + + // if port don't have fip + if len(fips) == 0 { + fip, err = os.associateFloatingIP(&osFips[0], portID, description) + if err != nil { + return "", err + } + } else if osFips[0].FloatingIP != fips[0].FloatingIP { + // disassociate old fip : if update fip without disassociate + // Openstack retrun http 409 error + // "Cannot associate floating IP with port using fixed + // IP, as that fixed IP already has a floating IP on + // external network" + _, err = os.disassociateFloatingIP(&fips[0], "") + if err != nil { + return "", err + } + // associate new fip + fip, err = os.associateFloatingIP(&osFips[0], portID, description) + if err != nil { + return "", err + } + } else { + fip = &fips[0] + } } return fip.FloatingIP, nil From 7eeef2f280542acf539a62f289311c4602ae9c45 Mon Sep 17 00:00:00 2001 From: Iacopo Colonnelli Date: Tue, 5 Mar 2024 18:06:35 +0100 Subject: [PATCH 09/23] Pass KeyId to EncryptResponse (#2535) --- pkg/kms/server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kms/server/server.go b/pkg/kms/server/server.go index f67ef0b4d6..363cfc3e5f 100644 --- a/pkg/kms/server/server.go +++ b/pkg/kms/server/server.go @@ -148,5 +148,5 @@ func (s *KMSserver) Encrypt(ctx context.Context, req *pb.EncryptRequest) (*pb.En klog.V(4).Infof("Failed to encrypt data %v: ", err) return nil, err } - return &pb.EncryptResponse{Ciphertext: cipher}, nil + return &pb.EncryptResponse{Ciphertext: cipher, KeyId: s.cfg.KeyManager.KeyID}, nil } From c63fa7bf44465d513c7314dc4bfc365d0631ff1d Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Fri, 15 Mar 2024 10:08:51 +0100 Subject: [PATCH 10/23] Improve CCM docs in dual-stack context (#2563) `subnet-id` is cumbersome to use in dual-stack environments, this commit documents that. --- .../using-openstack-cloud-controller-manager.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md b/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md index 996b86ce68..06bc54bb17 100644 --- a/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md +++ b/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md @@ -217,13 +217,13 @@ Although the openstack-cloud-controller-manager was initially implemented with N Optional. If specified, only "v2" is supported. * `subnet-id` - ID of the Neutron subnet on which to create load balancer VIP. This ID is also used to create pool members, if `member-subnet-id` is not set. + ID of the Neutron subnet on which to create load balancer VIP. This ID is also used to create pool members, if `member-subnet-id` is not set. For dual-stack deployments it's recommended to not set this option and let cloud-provider-openstack autodetect which subnet to use for which load balancer. * `member-subnet-id` ID of the Neutron network on which to create the members of the load balancer. The load balancer gets another network port on this subnet. Defaults to `subnet-id` if not set. * `network-id` - ID of the Neutron network on which to create load balancer VIP, not needed if `subnet-id` is set. + ID of the Neutron network on which to create load balancer VIP, not needed if `subnet-id` is set. If not set network will be autodetected based on the network used by cluster nodes. * `manage-security-groups` If the Neutron security groups should be managed separately. Default: false From b75efdb1c80d15e3ff8924fc149dfd91eb937d97 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Fri, 15 Mar 2024 10:24:00 +0100 Subject: [PATCH 11/23] Update KMS API to v2 (#2561) KMS v1 API is deprecated and in v1.29 core K8s won't allow it with default feature gates set. This commit makes sure we're proposing configuration of v2 API in example EncryptionConfigs. --- docs/barbican-kms-plugin/using-barbican-kms-plugin.md | 6 +++--- manifests/barbican-kms/encryption-config.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/barbican-kms-plugin/using-barbican-kms-plugin.md b/docs/barbican-kms-plugin/using-barbican-kms-plugin.md index c3a178fccd..6323fc0fb8 100644 --- a/docs/barbican-kms-plugin/using-barbican-kms-plugin.md +++ b/docs/barbican-kms-plugin/using-barbican-kms-plugin.md @@ -84,7 +84,7 @@ plane nodes. kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/master/manifests/barbican-kms/ds.yaml ``` *recommendation:* Use the tag corresponding to your Kubernetes release, for -example `release-1.25` for kubernetes version 1.25. +example `release-1.29` for kubernetes version 1.29. ### Create encryption configuration @@ -99,9 +99,9 @@ resources: - secrets providers: - kms: - name : barbican + apiVersion: v2 + name: barbican endpoint: unix:///var/lib/kms/kms.sock - cachesize: 100 - identity: {} ``` diff --git a/manifests/barbican-kms/encryption-config.yaml b/manifests/barbican-kms/encryption-config.yaml index 6ffeb5b3da..37672a4e47 100755 --- a/manifests/barbican-kms/encryption-config.yaml +++ b/manifests/barbican-kms/encryption-config.yaml @@ -5,7 +5,7 @@ resources: - secrets providers: - kms: + apiVersion: v2 name: barbican endpoint: unix:///var/lib/kms/kms.sock - cachesize: 20 - identity: {} From d28a24baeb233e10ec06c3396606138479956fd2 Mon Sep 17 00:00:00 2001 From: Maysa De Macedo Souza Date: Tue, 19 Mar 2024 09:04:17 -0300 Subject: [PATCH 12/23] Remove enforcement of IPv6 LB as internal (#2557) In OpenStack IPv6 that uses GUAs don't require NAT to access the outside world, so IPv6 can be rechable without Floating IPs, which makes the enforcement of IPv6 LB as internal in CPO not necessary. This commit removes this enforcement, which results in IPv6 load-balancers being allowed to be shared between Services. Also, now it's possible to make the load-balancer use the IPv6 stateful address defined in the loadBalancerIP of the Service. --- pkg/openstack/events.go | 1 + pkg/openstack/loadbalancer.go | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/pkg/openstack/events.go b/pkg/openstack/events.go index 200b613d4a..4a0a073f78 100644 --- a/pkg/openstack/events.go +++ b/pkg/openstack/events.go @@ -21,4 +21,5 @@ const ( eventLBExternalNetworkSearchFailed = "LoadBalancerExternalNetworkSearchFailed" eventLBSourceRangesIgnored = "LoadBalancerSourceRangesIgnored" eventLBAZIgnored = "LoadBalancerAvailabilityZonesIgnored" + eventLBFloatingIPSkipped = "LoadBalancerFloatingIPSkipped" ) diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index ca411fca62..e5f496830f 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -259,8 +259,10 @@ func (lbaas *LbaasV2) createOctaviaLoadBalancer(name, clusterName string, servic // For external load balancer, the LoadBalancerIP is a public IP address. loadBalancerIP := service.Spec.LoadBalancerIP - if loadBalancerIP != "" && svcConf.internal { - createOpts.VipAddress = loadBalancerIP + if loadBalancerIP != "" { + if svcConf.internal || (svcConf.preferredIPFamily == corev1.IPv6Protocol) { + createOpts.VipAddress = loadBalancerIP + } } if !lbaas.opts.ProviderRequiresSerialAPICalls { @@ -1315,9 +1317,6 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node klog.V(3).InfoS("Enforcing internal LB", "annotation", true, "config", false) } svcConf.internal = true - } else if svcConf.preferredIPFamily == corev1.IPv6Protocol { - // floating IPs are not supported in IPv6 networks - svcConf.internal = true } else { svcConf.internal = getBoolFromServiceAnnotation(service, ServiceAnnotationLoadBalancerInternal, lbaas.opts.InternalLB) } @@ -1711,11 +1710,18 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } } - addr, err := lbaas.ensureFloatingIP(clusterName, service, loadbalancer, svcConf, isLBOwner) - if err != nil { - return nil, err + addr := loadbalancer.VipAddress + // IPv6 Load Balancers have no support for Floating IP. + if netutils.IsIPv6String(addr) { + msg := "Floating IP not supported for IPv6 Service %s. Using IPv6 address instead %s." + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBFloatingIPSkipped, msg, serviceName, addr) + klog.Infof(msg, serviceName, addr) + } else { + addr, err = lbaas.ensureFloatingIP(clusterName, service, loadbalancer, svcConf, isLBOwner) + if err != nil { + return nil, err + } } - // Add annotation to Service and add LB name to load balancer tags. annotationUpdate := map[string]string{ ServiceAnnotationLoadBalancerID: loadbalancer.ID, From ea198dd003627eae96a4c6d53d595e594d960260 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 21 Mar 2024 13:30:22 +0000 Subject: [PATCH 13/23] Convert outstanding commands to cobra (#2384) * magnum-auto-healer: Stop registering klog options Per [1], this is no longer desirable. We are already registering the minimal options that the KEP suggests so we can simply stop registering the others. [1] https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components Signed-off-by: Stephen Finucane * client-keystone-auth: Stop registering klog options As with magnum-auto-healer, we don't need/want to do this anymore. Don't. Signed-off-by: Stephen Finucane * cinder-csi-plugin: Use binary name in help page Signed-off-by: Stephen Finucane * client-keystone-auth: Migrate to cobra This one is relatively trivial since the 'Run' function in 'k8s.io/component-base/cli' does most of the heavy lifting for us now, including registering logging arguments. Signed-off-by: Stephen Finucane * k8s-keystone-auth: Migrate to cobra This one is slightly trickier due to how we're doing configuration but there's still nothing crazy confusing here. Signed-off-by: Stephen Finucane * occm: Remove unnecessary flag handling code k8s.io/cloud-provider switched to cobra some time back [1] and cobra uses 'pflag' rather than 'flag' under the hood. As such, there's no reason to keep the handling code for 'flag' options around. Remove it. [1] https://github.com/kubernetes/cloud-provider/blob/v0.28.0/app/controllermanager.go#L87-L124 Signed-off-by: Stephen Finucane * occm: Register additional options correctly The 'NewCloudControllerManagerCommand' function, which generates the cobra Command that forms the basis of a cloud provider binary, accepts an 'additionalFlags' argument that allows us to (surprise) pass in additional provider-specific arguments. We were not making use of this, which means our options were not showing in the usage string shown on e.g. '--help'. Correct this on our end, while we wait for the fix in k8s.io/cloud-provider [1] to close the loop fully. In additional, move our 'InitLogs' call higher up the function to before our first logging call so that everything is initialised correctly. [1] https://github.com/kubernetes/kubernetes/issues/120522 Signed-off-by: Stephen Finucane * magnum-auto-healer: Remove use of 'init' methods Instead, register the health checks and cloud provider plugins on controller start up. This avoids side-effects from merely importing the modules - which are polluting the output of '--help' - and is generally "less weird". To do this, we must make the registration methods part of the public API and remove the 'pkg/autohealing/cloudprovider/register' package in favour of a public registration method in the 'pkg/autohealing/cloudprovider/openstack' package. Signed-off-by: Stephen Finucane --------- Signed-off-by: Stephen Finucane --- cmd/cinder-csi-plugin/main.go | 2 +- cmd/client-keystone-auth/main.go | 87 +++++++++---------- cmd/k8s-keystone-auth/main.go | 52 ++++++----- .../main.go | 20 ++--- .../cloudprovider/openstack/provider.go | 55 ++++++++++++ .../cloudprovider/register/register.go | 87 ------------------- pkg/autohealing/cmd/root.go | 6 -- pkg/autohealing/controller/controller.go | 11 ++- pkg/autohealing/healthcheck/healthcheck.go | 2 +- .../healthcheck/plugin_endpoint.go | 6 +- .../healthcheck/plugin_nodecondition.go | 6 +- 11 files changed, 138 insertions(+), 196 deletions(-) delete mode 100644 pkg/autohealing/cloudprovider/register/register.go diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index 1673bda4e1..6ce8556689 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -42,7 +42,7 @@ var ( func main() { cmd := &cobra.Command{ - Use: "Cinder", + Use: "cinder-csi-plugin", Short: "CSI based Cinder driver", Run: func(cmd *cobra.Command, args []string) { handle() diff --git a/cmd/client-keystone-auth/main.go b/cmd/client-keystone-auth/main.go index 0612507bf8..86e10ba6c2 100644 --- a/cmd/client-keystone-auth/main.go +++ b/cmd/client-keystone-auth/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "flag" "fmt" "io" "os" @@ -25,15 +24,13 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/utils/openstack/clientconfig" - "github.com/spf13/pflag" - "k8s.io/component-base/logs" + "github.com/spf13/cobra" + "k8s.io/component-base/cli" "golang.org/x/term" "k8s.io/cloud-provider-openstack/pkg/identity/keystone" "k8s.io/cloud-provider-openstack/pkg/version" - kflag "k8s.io/component-base/cli/flag" - "k8s.io/klog/v2" ) const errRespTemplate string = `{ @@ -137,51 +134,49 @@ func argumentsAreSet(url, user, project, password, domain, applicationCredential return false } +var ( + url string + domain string + user string + project string + password string + clientCertPath string + clientKeyPath string + clientCAPath string + options keystone.Options + err error + applicationCredentialID string + applicationCredentialName string + applicationCredentialSecret string +) + func main() { - var url string - var domain string - var user string - var project string - var password string - var clientCertPath string - var clientKeyPath string - var clientCAPath string - var options keystone.Options - var err error - var applicationCredentialID string - var applicationCredentialName string - var applicationCredentialSecret string - var showVersion bool - - pflag.StringVar(&url, "keystone-url", os.Getenv("OS_AUTH_URL"), "URL for the OpenStack Keystone API") - pflag.StringVar(&domain, "domain-name", os.Getenv("OS_DOMAIN_NAME"), "Keystone domain name") - pflag.StringVar(&user, "user-name", os.Getenv("OS_USERNAME"), "User name") - pflag.StringVar(&project, "project-name", os.Getenv("OS_PROJECT_NAME"), "Keystone project name") - pflag.StringVar(&password, "password", os.Getenv("OS_PASSWORD"), "Password") - pflag.StringVar(&clientCertPath, "cert", os.Getenv("OS_CERT"), "Client certificate bundle file") - pflag.StringVar(&clientKeyPath, "key", os.Getenv("OS_KEY"), "Client certificate key file") - pflag.StringVar(&clientCAPath, "cacert", os.Getenv("OS_CACERT"), "Certificate authority file") - pflag.StringVar(&applicationCredentialID, "application-credential-id", os.Getenv("OS_APPLICATION_CREDENTIAL_ID"), "Application Credential ID") - pflag.StringVar(&applicationCredentialName, "application-credential-name", os.Getenv("OS_APPLICATION_CREDENTIAL_NAME"), "Application Credential Name") - pflag.StringVar(&applicationCredentialSecret, "application-credential-secret", os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET"), "Application Credential Secret") - pflag.BoolVar(&showVersion, "version", false, "Show current version and exit") - - logs.AddFlags(pflag.CommandLine) - - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - pflag.CommandLine.AddGoFlagSet(klogFlags) - - kflag.InitFlags() - - if showVersion { - fmt.Println(version.Version) - os.Exit(0) + cmd := &cobra.Command{ + Use: "client-keystone-auth", + Short: "Keystone client credential plugin for Kubernetes", + Run: func(cmd *cobra.Command, args []string) { + handle() + }, + Version: version.Version, } - logs.InitLogs() - defer logs.FlushLogs() + cmd.PersistentFlags().StringVar(&url, "keystone-url", os.Getenv("OS_AUTH_URL"), "URL for the OpenStack Keystone API") + cmd.PersistentFlags().StringVar(&domain, "domain-name", os.Getenv("OS_DOMAIN_NAME"), "Keystone domain name") + cmd.PersistentFlags().StringVar(&user, "user-name", os.Getenv("OS_USERNAME"), "User name") + cmd.PersistentFlags().StringVar(&project, "project-name", os.Getenv("OS_PROJECT_NAME"), "Keystone project name") + cmd.PersistentFlags().StringVar(&password, "password", os.Getenv("OS_PASSWORD"), "Password") + cmd.PersistentFlags().StringVar(&clientCertPath, "cert", os.Getenv("OS_CERT"), "Client certificate bundle file") + cmd.PersistentFlags().StringVar(&clientKeyPath, "key", os.Getenv("OS_KEY"), "Client certificate key file") + cmd.PersistentFlags().StringVar(&clientCAPath, "cacert", os.Getenv("OS_CACERT"), "Certificate authority file") + cmd.PersistentFlags().StringVar(&applicationCredentialID, "application-credential-id", os.Getenv("OS_APPLICATION_CREDENTIAL_ID"), "Application Credential ID") + cmd.PersistentFlags().StringVar(&applicationCredentialName, "application-credential-name", os.Getenv("OS_APPLICATION_CREDENTIAL_NAME"), "Application Credential Name") + cmd.PersistentFlags().StringVar(&applicationCredentialSecret, "application-credential-secret", os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET"), "Application Credential Secret") + + code := cli.Run(cmd) + os.Exit(code) +} +func handle() { // Generate Gophercloud Auth Options based on input data from stdin // if IsTerminal returns "true", or from env variables otherwise. if !term.IsTerminal(int(os.Stdin.Fd())) { diff --git a/cmd/k8s-keystone-auth/main.go b/cmd/k8s-keystone-auth/main.go index fd6cdefe60..e5c4c12d8a 100644 --- a/cmd/k8s-keystone-auth/main.go +++ b/cmd/k8s-keystone-auth/main.go @@ -15,47 +15,45 @@ limitations under the License. package main import ( - "fmt" "os" + "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/component-base/cli" "k8s.io/klog/v2" "k8s.io/cloud-provider-openstack/pkg/identity/keystone" "k8s.io/cloud-provider-openstack/pkg/version" - kflag "k8s.io/component-base/cli/flag" - "k8s.io/component-base/logs" ) +var config = keystone.NewConfig() + func main() { - var showVersion bool - pflag.BoolVar(&showVersion, "version", false, "Show current version and exit") + cmd := &cobra.Command{ + Use: "k8s-keystone-auth", + Short: "Keystone authentication webhook plugin for Kubernetes", + Run: func(cmd *cobra.Command, args []string) { + if err := config.ValidateFlags(); err != nil { + klog.Errorf("%v", err) + os.Exit(1) + } + + keystoneAuth, err := keystone.NewKeystoneAuth(config) + if err != nil { + klog.Errorf("%v", err) + os.Exit(1) + } + keystoneAuth.Run() + + }, + Version: version.Version, + } - logs.AddFlags(pflag.CommandLine) keystone.AddExtraFlags(pflag.CommandLine) - logs.InitLogs() - defer logs.FlushLogs() - - config := keystone.NewConfig() config.AddFlags(pflag.CommandLine) - kflag.InitFlags() - - if showVersion { - fmt.Println(version.Version) - os.Exit(0) - } - - if err := config.ValidateFlags(); err != nil { - klog.Errorf("%v", err) - os.Exit(1) - } + code := cli.Run(cmd) + os.Exit(code) - keystoneAuth, err := keystone.NewKeystoneAuth(config) - if err != nil { - klog.Errorf("%v", err) - os.Exit(1) - } - keystoneAuth.Run() } diff --git a/cmd/openstack-cloud-controller-manager/main.go b/cmd/openstack-cloud-controller-manager/main.go index 720cf51ae2..ddd30e7179 100644 --- a/cmd/openstack-cloud-controller-manager/main.go +++ b/cmd/openstack-cloud-controller-manager/main.go @@ -20,11 +20,9 @@ limitations under the License. package main import ( - goflag "flag" "fmt" "os" - "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" @@ -43,26 +41,18 @@ import ( ) func main() { + logs.InitLogs() + defer logs.FlushLogs() + ccmOptions, err := options.NewCloudControllerManagerOptions() if err != nil { klog.Fatalf("unable to initialize command options: %v", err) } fss := cliflag.NamedFlagSets{} - command := app.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, names.CCMControllerAliases(), fss, wait.NeverStop) - - openstack.AddExtraFlags(pflag.CommandLine) + openstack.AddExtraFlags(fss.FlagSet("OpenStack Client")) - // TODO: once we switch everything over to Cobra commands, we can go back to calling - // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the - // normalize func and add the go flag set by hand. - // Here is an sample - pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - - // utilflag.InitFlags() - logs.InitLogs() - defer logs.FlushLogs() + command := app.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, names.CCMControllerAliases(), fss, wait.NeverStop) klog.V(1).Infof("openstack-cloud-controller-manager version: %s", version.Version) diff --git a/pkg/autohealing/cloudprovider/openstack/provider.go b/pkg/autohealing/cloudprovider/openstack/provider.go index 70a27c9d0e..93fb151078 100644 --- a/pkg/autohealing/cloudprovider/openstack/provider.go +++ b/pkg/autohealing/cloudprovider/openstack/provider.go @@ -25,6 +25,7 @@ import ( "time" "github.com/gophercloud/gophercloud" + gopenstack "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" @@ -43,8 +44,10 @@ import ( "k8s.io/client-go/util/retry" log "k8s.io/klog/v2" + "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider" "k8s.io/cloud-provider-openstack/pkg/autohealing/config" "k8s.io/cloud-provider-openstack/pkg/autohealing/healthcheck" + "k8s.io/cloud-provider-openstack/pkg/client" ) const ( @@ -671,3 +674,55 @@ func CheckNodeCondition(node *apiv1.Node, conditionType apiv1.NodeConditionType, } return false } + +func NewOpenStackCloudProvider(cfg config.Config, kubeClient kubernetes.Interface) (cloudprovider.CloudProvider, error) { + client, err := client.NewOpenStackClient(&cfg.OpenStack, "magnum-auto-healer") + if err != nil { + return nil, err + } + + eoOpts := gophercloud.EndpointOpts{ + Region: cfg.OpenStack.Region, + Availability: cfg.OpenStack.EndpointType, + } + + // get nova service client + var novaClient *gophercloud.ServiceClient + novaClient, err = gopenstack.NewComputeV2(client, eoOpts) + if err != nil { + return nil, fmt.Errorf("failed to find Nova service endpoint in the region %s: %v", cfg.OpenStack.Region, err) + } + + // get heat service client + var heatClient *gophercloud.ServiceClient + heatClient, err = gopenstack.NewOrchestrationV1(client, eoOpts) + if err != nil { + return nil, fmt.Errorf("failed to find Heat service endpoint in the region %s: %v", cfg.OpenStack.Region, err) + } + + // get magnum service client + var magnumClient *gophercloud.ServiceClient + magnumClient, err = gopenstack.NewContainerInfraV1(client, eoOpts) + if err != nil { + return nil, fmt.Errorf("failed to find Magnum service endpoint in the region %s: %v", cfg.OpenStack.Region, err) + } + magnumClient.Microversion = "latest" + + // get cinder service client + var cinderClient *gophercloud.ServiceClient + cinderClient, err = gopenstack.NewBlockStorageV3(client, eoOpts) + if err != nil { + return nil, fmt.Errorf("failed to find Cinder service endpoint in the region %s: %v", cfg.OpenStack.Region, err) + } + + p := CloudProvider{ + KubeClient: kubeClient, + Nova: novaClient, + Heat: heatClient, + Magnum: magnumClient, + Cinder: cinderClient, + Config: cfg, + } + + return p, nil +} diff --git a/pkg/autohealing/cloudprovider/register/register.go b/pkg/autohealing/cloudprovider/register/register.go deleted file mode 100644 index 3c2e799e93..0000000000 --- a/pkg/autohealing/cloudprovider/register/register.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// register package is introduced in order to avoid circle imports between openstack and cloudprovider packages. -package register - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - gopenstack "github.com/gophercloud/gophercloud/openstack" - "k8s.io/client-go/kubernetes" - - "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider" - "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider/openstack" - "k8s.io/cloud-provider-openstack/pkg/autohealing/config" - "k8s.io/cloud-provider-openstack/pkg/client" -) - -func registerOpenStack(cfg config.Config, kubeClient kubernetes.Interface) (cloudprovider.CloudProvider, error) { - client, err := client.NewOpenStackClient(&cfg.OpenStack, "magnum-auto-healer") - if err != nil { - return nil, err - } - - eoOpts := gophercloud.EndpointOpts{ - Region: cfg.OpenStack.Region, - Availability: cfg.OpenStack.EndpointType, - } - - // get nova service client - var novaClient *gophercloud.ServiceClient - novaClient, err = gopenstack.NewComputeV2(client, eoOpts) - if err != nil { - return nil, fmt.Errorf("failed to find Nova service endpoint in the region %s: %v", cfg.OpenStack.Region, err) - } - - // get heat service client - var heatClient *gophercloud.ServiceClient - heatClient, err = gopenstack.NewOrchestrationV1(client, eoOpts) - if err != nil { - return nil, fmt.Errorf("failed to find Heat service endpoint in the region %s: %v", cfg.OpenStack.Region, err) - } - - // get magnum service client - var magnumClient *gophercloud.ServiceClient - magnumClient, err = gopenstack.NewContainerInfraV1(client, eoOpts) - if err != nil { - return nil, fmt.Errorf("failed to find Magnum service endpoint in the region %s: %v", cfg.OpenStack.Region, err) - } - magnumClient.Microversion = "latest" - - // get cinder service client - var cinderClient *gophercloud.ServiceClient - cinderClient, err = gopenstack.NewBlockStorageV3(client, eoOpts) - if err != nil { - return nil, fmt.Errorf("failed to find Cinder service endpoint in the region %s: %v", cfg.OpenStack.Region, err) - } - - p := openstack.CloudProvider{ - KubeClient: kubeClient, - Nova: novaClient, - Heat: heatClient, - Magnum: magnumClient, - Cinder: cinderClient, - Config: cfg, - } - - return p, nil -} - -func init() { - cloudprovider.RegisterCloudProvider(openstack.ProviderName, registerOpenStack) -} diff --git a/pkg/autohealing/cmd/root.go b/pkg/autohealing/cmd/root.go index 549107bcb5..90d25a46ca 100644 --- a/pkg/autohealing/cmd/root.go +++ b/pkg/autohealing/cmd/root.go @@ -18,7 +18,6 @@ package cmd import ( "context" - goflag "flag" "fmt" "os" "os/signal" @@ -27,7 +26,6 @@ import ( "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" - flag "github.com/spf13/pflag" "github.com/spf13/viper" "k8s.io/client-go/tools/leaderelection" "k8s.io/component-base/cli" @@ -96,10 +94,6 @@ func Execute() { func init() { cobra.OnInitialize(initConfig) rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.kube_autohealer_config.yaml)") - - log.InitFlags(nil) - _ = goflag.CommandLine.Parse(nil) - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) } // initConfig reads in config file and ENV variables if set. diff --git a/pkg/autohealing/controller/controller.go b/pkg/autohealing/controller/controller.go index c7f79f0645..1742d5c3dd 100644 --- a/pkg/autohealing/controller/controller.go +++ b/pkg/autohealing/controller/controller.go @@ -38,9 +38,7 @@ import ( log "k8s.io/klog/v2" "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider" - // revive:disable:blank-imports - _ "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider/register" - // revive:enable:blank-imports + "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider/openstack" "k8s.io/cloud-provider-openstack/pkg/autohealing/config" "k8s.io/cloud-provider-openstack/pkg/autohealing/healthcheck" ) @@ -119,6 +117,13 @@ func createKubeClients(apiserverHost string, kubeConfig string) (*kubernetes.Cli // NewController creates a new autohealer controller. func NewController(conf config.Config) *Controller { + // register healthchecks + healthcheck.RegisterHealthCheck(healthcheck.EndpointType, healthcheck.NewEndpointCheck) + healthcheck.RegisterHealthCheck(healthcheck.NodeConditionType, healthcheck.NewNodeConditionCheck) + + // register clouds + cloudprovider.RegisterCloudProvider(openstack.ProviderName, openstack.NewOpenStackCloudProvider) + // initialize k8s clients kubeClient, leaderElectionClient, err := createKubeClients(conf.Kubernetes.ApiserverHost, conf.Kubernetes.KubeConfig) if err != nil { diff --git a/pkg/autohealing/healthcheck/healthcheck.go b/pkg/autohealing/healthcheck/healthcheck.go index b8ecc565fa..3b06aa209e 100644 --- a/pkg/autohealing/healthcheck/healthcheck.go +++ b/pkg/autohealing/healthcheck/healthcheck.go @@ -59,7 +59,7 @@ type NodeController interface { UpdateNodeAnnotation(node NodeInfo, annotation string, value string) error } -func registerHealthCheck(name string, register registerPlugin) { +func RegisterHealthCheck(name string, register registerPlugin) { if _, found := checkPlugins[name]; found { log.Fatalf("Health check plugin %s is already registered.", name) } diff --git a/pkg/autohealing/healthcheck/plugin_endpoint.go b/pkg/autohealing/healthcheck/plugin_endpoint.go index c7c4ed7976..80fdf639ae 100644 --- a/pkg/autohealing/healthcheck/plugin_endpoint.go +++ b/pkg/autohealing/healthcheck/plugin_endpoint.go @@ -184,7 +184,7 @@ func (check *EndpointCheck) Check(node NodeInfo, controller NodeController) bool return check.checkDuration(node, controller, true) } -func newEndpointCheck(config interface{}) (HealthCheck, error) { +func NewEndpointCheck(config interface{}) (HealthCheck, error) { check := EndpointCheck{ Protocol: "https", Port: 6443, @@ -210,7 +210,3 @@ func newEndpointCheck(config interface{}) (HealthCheck, error) { return &check, nil } - -func init() { - registerHealthCheck(EndpointType, newEndpointCheck) -} diff --git a/pkg/autohealing/healthcheck/plugin_nodecondition.go b/pkg/autohealing/healthcheck/plugin_nodecondition.go index e70ae369fa..222c215d10 100644 --- a/pkg/autohealing/healthcheck/plugin_nodecondition.go +++ b/pkg/autohealing/healthcheck/plugin_nodecondition.go @@ -88,7 +88,7 @@ func (check *NodeConditionCheck) IsWorkerSupported() bool { return true } -func newNodeConditionCheck(config interface{}) (HealthCheck, error) { +func NewNodeConditionCheck(config interface{}) (HealthCheck, error) { check := NodeConditionCheck{ UnhealthyDuration: 300 * time.Second, Types: []string{"Ready"}, @@ -110,7 +110,3 @@ func newNodeConditionCheck(config interface{}) (HealthCheck, error) { return &check, nil } - -func init() { - registerHealthCheck(NodeConditionType, newNodeConditionCheck) -} From 3225d984143466e4cd21cbe7c01b555438569a33 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Thu, 28 Mar 2024 13:30:27 +0100 Subject: [PATCH 14/23] Update the release procedure (#2565) Seems like the point explaining how to promote images from staging is gone, this commit adds it back, fixes formatting and moves the file into the `docs` directory. --- docs/release-procedure.md | 44 +++++++++++++++++++++++++++++++++++++++ release-procedure.md | 42 ------------------------------------- 2 files changed, 44 insertions(+), 42 deletions(-) create mode 100644 docs/release-procedure.md delete mode 100644 release-procedure.md diff --git a/docs/release-procedure.md b/docs/release-procedure.md new file mode 100644 index 0000000000..953edbb412 --- /dev/null +++ b/docs/release-procedure.md @@ -0,0 +1,44 @@ +# Release Procedure + +The Cloud Provider OpenStack Release is done in sync with +kubernetes/kubernetes. Minor versions can be released intermittently for +critical bug fixes. + +## Making a Release + +1. Checkout the release branch. + + ```bash + $ git fetch upstream + $ git checkout -b my-release upstream/release-X.Y + ``` + +2. Update the minor version with the expected version. + + Make changes in the `docs/manifests/tests/examples` directories using the + `hack/bump_release.sh` script by running the following command: + + ```bash + $ hack/bump-release.sh 28 29 0 + ``` + + This will replace `1.28.x`/`2.28.x` with `1.29.0`/`2.29.0` strings in the + `docs/manifests/tests/examples` directories. Ensure that you double-check the + diff before committing the changes. Non-related changes must not be shipped. + +3. Create a new pull request (PR) and make sure all CI checks have passed. + +4. Once the PR is merged, make a tag and push it to the upstream repository. + + ```bash + $ git checkout -b release-X.Y upstream/release-X.Y + $ git pull upstream release-X.Y --tags + $ git tag -m "Release for cloud-provider-openstack to support Kubernetes release x" vX.Y.Z + $ git push upstream vX.Y.Z + ``` + +5. [Github Actions](https://github.com/kubernetes/cloud-provider-openstack/actions/workflows/release-cpo.yaml) will create new [Docker images](https://console.cloud.google.com/gcr/images/k8s-staging-provider-os) and generate a [new draft release](https://github.com/kubernetes/cloud-provider-openstack/releases) in the repository. + +6. Make PR modifying [images.yaml](https://github.com/kubernetes/k8s.io/blob/main/registry.k8s.io/images/k8s-staging-provider-os/images.yaml) to promote gcr.io images to registry.k8s.io. The point is to copy the proper image sha256 hashes from the staging repository to the images.yaml. + +7. Once images are promoted create release notes using the "Generate release notes" button in the GitHub "New release" UI and publish the release. diff --git a/release-procedure.md b/release-procedure.md deleted file mode 100644 index febf267d70..0000000000 --- a/release-procedure.md +++ /dev/null @@ -1,42 +0,0 @@ -# Release Procedure - -The Cloud Provider OpenStack Release is done in sync with -kubernetes/kubernetes. Minor versions can be released intermittently for -critical bug fixes. - -## Making a Release - -1. Checkout the release branch. - -```bash -$ git fetch upstream -$ git checkout -b my-release upstream/release-X.Y -``` - -2. Update the minor version with the expected version. - -Make changes in the `docs/manifests/tests/examples` directories using the -`hack/bump_release.sh` script by running the following command: - -```bash -$ hack/bump-release.sh 28 29 0 -``` - -This will replace `1.28.x`/`2.28.x` with `1.29.0`/`2.29.0` strings in the -`docs/manifests/tests/examples` directories. Ensure that you double-check the -diff before committing the changes. Non-related changes must not be shipped. - -3. Create a new pull request (PR) and make sure all CI checks have passed. - -4. Once the PR is merged, make a tag and push it to the upstream repository. - -```bash -$ git checkout -b release-X.Y upstream/release-X.Y -$ git pull upstream release-X.Y --tags -$ git tag -m "Release for cloud-provider-openstack to support Kubernetes release x" vX.Y.Z -$ git push upstream vX.Y.Z -``` - -5. [Github Actions](https://github.com/kubernetes/cloud-provider-openstack/actions/workflows/release-cpo.yaml) will create new [Docker images](https://console.cloud.google.com/gcr/images/k8s-staging-provider-os) and generate a [new draft release](https://github.com/kubernetes/cloud-provider-openstack/releases) in the repository. - -6. Create release notes using the "Generate release notes" button in the GitHub "New release" UI and publish the release. From 290e7c72cb689141ab7e3eeae7edacb1b993c818 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Mon, 8 Apr 2024 16:16:04 +0200 Subject: [PATCH 15/23] Allow changing cluster-name on existing deployments (#2552) It's a common issue that clusters are deployed with the default `--cluster-name=kubernetes` and later on it's discovered that next deployments on the same cloud will have conflicts when trying to manage LBs of the same namespace and name. This commit aims at allowing to change the cluster-name on a running environment and handling all the renames of the LB resources and their tags. --- pkg/openstack/events.go | 1 + pkg/openstack/loadbalancer.go | 42 ++++-- pkg/openstack/loadbalancer_rename.go | 149 ++++++++++++++++++++++ pkg/openstack/loadbalancer_rename_test.go | 127 ++++++++++++++++++ pkg/openstack/loadbalancer_test.go | 8 +- pkg/util/openstack/loadbalancer.go | 31 ++++- 6 files changed, 333 insertions(+), 25 deletions(-) create mode 100644 pkg/openstack/loadbalancer_rename.go create mode 100644 pkg/openstack/loadbalancer_rename_test.go diff --git a/pkg/openstack/events.go b/pkg/openstack/events.go index 4a0a073f78..60dd5cc649 100644 --- a/pkg/openstack/events.go +++ b/pkg/openstack/events.go @@ -22,4 +22,5 @@ const ( eventLBSourceRangesIgnored = "LoadBalancerSourceRangesIgnored" eventLBAZIgnored = "LoadBalancerAvailabilityZonesIgnored" eventLBFloatingIPSkipped = "LoadBalancerFloatingIPSkipped" + eventLBRename = "LoadBalancerRename" ) diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index e5f496830f..b9708c8920 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -49,7 +49,6 @@ import ( // Note: when creating a new Loadbalancer (VM), it can take some time before it is ready for use, // this timeout is used for waiting until the Loadbalancer provisioning status goes to ACTIVE state. const ( - servicePrefix = "kube_service_" defaultLoadBalancerSourceRangesIPv4 = "0.0.0.0/0" defaultLoadBalancerSourceRangesIPv6 = "::/0" activeStatus = "ACTIVE" @@ -93,10 +92,14 @@ const ( ServiceAnnotationLoadBalancerID = "loadbalancer.openstack.org/load-balancer-id" // Octavia resources name formats + servicePrefix = "kube_service_" lbFormat = "%s%s_%s_%s" - listenerFormat = "listener_%d_%s" - poolFormat = "pool_%d_%s" - monitorFormat = "monitor_%d_%s" + listenerPrefix = "listener_" + listenerFormat = listenerPrefix + "%d_%s" + poolPrefix = "pool_" + poolFormat = poolPrefix + "%d_%s" + monitorPrefix = "monitor_" + monitorFormat = monitorPrefix + "%d_%s" ) // LbaasV2 is a LoadBalancer implementation based on Octavia @@ -1550,13 +1553,11 @@ func (lbaas *LbaasV2) checkListenerPorts(service *corev1.Service, curListenerMap return nil } -func (lbaas *LbaasV2) updateServiceAnnotations(service *corev1.Service, annotations map[string]string) { +func (lbaas *LbaasV2) updateServiceAnnotation(service *corev1.Service, key, value string) { if service.ObjectMeta.Annotations == nil { service.ObjectMeta.Annotations = map[string]string{} } - for key, value := range annotations { - service.ObjectMeta.Annotations[key] = value - } + service.ObjectMeta.Annotations[key] = value } // createLoadBalancerStatus creates the loadbalancer status from the different possible sources @@ -1608,6 +1609,17 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName return nil, fmt.Errorf("failed to get load balancer %s: %v", svcConf.lbID, err) } + // Here we test for a clusterName that could have had changed in the deployment. + if lbHasOldClusterName(loadbalancer, clusterName) { + msg := "Loadbalancer %s has a name of %s with incorrect cluster-name component. Renaming it to %s." + klog.Infof(msg, loadbalancer.ID, loadbalancer.Name, lbName) + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBRename, msg, loadbalancer.ID, loadbalancer.Name, lbName) + loadbalancer, err = renameLoadBalancer(lbaas.lb, loadbalancer, lbName, clusterName) + if err != nil { + return nil, fmt.Errorf("failed to update load balancer %s with an updated name", svcConf.lbID) + } + } + // If this LB name matches the default generated name, the Service 'owns' the LB, but it's also possible for this // LB to be shared by other Services. // If the names don't match, this is a LB this Service wants to attach. @@ -1656,6 +1668,9 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName isLBOwner = true } + // Make sure LB ID will be saved at this point. + lbaas.updateServiceAnnotation(service, ServiceAnnotationLoadBalancerID, loadbalancer.ID) + if loadbalancer.ProvisioningStatus != activeStatus { return nil, fmt.Errorf("load balancer %s is not ACTIVE, current provisioning status: %s", loadbalancer.ID, loadbalancer.ProvisioningStatus) } @@ -1722,12 +1737,11 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName return nil, err } } - // Add annotation to Service and add LB name to load balancer tags. - annotationUpdate := map[string]string{ - ServiceAnnotationLoadBalancerID: loadbalancer.ID, - ServiceAnnotationLoadBalancerAddress: addr, - } - lbaas.updateServiceAnnotations(service, annotationUpdate) + + // save address into the annotation + lbaas.updateServiceAnnotation(service, ServiceAnnotationLoadBalancerAddress, addr) + + // add LB name to load balancer tags. if svcConf.supportLBTags { lbTags := loadbalancer.Tags if !cpoutil.Contains(lbTags, lbName) { diff --git a/pkg/openstack/loadbalancer_rename.go b/pkg/openstack/loadbalancer_rename.go new file mode 100644 index 0000000000..eab867d507 --- /dev/null +++ b/pkg/openstack/loadbalancer_rename.go @@ -0,0 +1,149 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "fmt" + "k8s.io/cloud-provider-openstack/pkg/util" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" + openstackutil "k8s.io/cloud-provider-openstack/pkg/util/openstack" +) + +// lbHasOldClusterName checks if the OCCM LB prefix is present and if so, validates the cluster-name +// component value. Returns true if the cluster-name component of the loadbalancer's name doesn't match +// clusterName. +func lbHasOldClusterName(loadbalancer *loadbalancers.LoadBalancer, clusterName string) bool { + if !strings.HasPrefix(loadbalancer.Name, servicePrefix) { + // This one was probably not created by OCCM, let's leave it as is. + return false + } + existingClusterName := getClusterName("", loadbalancer.Name) + + return existingClusterName != clusterName +} + +// decomposeLBName returns clusterName based on object name +func getClusterName(resourcePrefix, objectName string) string { + // This is not 100% bulletproof when string was cut because full name would exceed 255 characters, but honestly + // it's highly unlikely, because it would mean cluster name, namespace and name would together need to exceed 200 + // characters. As a precaution the _ part is treated as optional in the regexp, assuming the longest trim + // that can happen will reach namespace, but never the clusterName. This fails if there's _ in clusterName, but + // we can't do nothing about it. + lbNameRegex := regexp.MustCompile(fmt.Sprintf("%s%s(.+?)_[^_]+(_[^_]+)?$", resourcePrefix, servicePrefix)) // this is static + + matches := lbNameRegex.FindAllStringSubmatch(objectName, -1) + if matches == nil { + return "" + } + return matches[0][1] +} + +// replaceClusterName tries to cut servicePrefix, replaces clusterName and puts back the prefix if it was there +func replaceClusterName(oldClusterName, clusterName, objectName string) string { + // Remove the prefix first + var found bool + objectName, found = strings.CutPrefix(objectName, servicePrefix) + objectName = strings.Replace(objectName, oldClusterName, clusterName, 1) + if found { + // This should always happen because we check that in lbHasOldClusterName, but just for safety. + objectName = servicePrefix + objectName + } + // We need to cut the name or tag to 255 characters, just as regular LB creation does. + return util.CutString255(objectName) +} + +// renameLoadBalancer renames all the children and then the LB itself to match new lbName. +// The purpose is handling a change of clusterName. +func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbalancers.LoadBalancer, lbName, clusterName string) (*loadbalancers.LoadBalancer, error) { + lbListeners, err := openstackutil.GetListenersByLoadBalancerID(client, loadbalancer.ID) + if err != nil { + return nil, err + } + for _, listener := range lbListeners { + if !strings.HasPrefix(listener.Name, listenerPrefix) { + // It doesn't seem to be ours, let's not touch it. + continue + } + + oldClusterName := getClusterName(fmt.Sprintf("%s[0-9]+_", listenerPrefix), listener.Name) + + if oldClusterName != clusterName { + // First let's handle pool which we assume is a child of the listener. Only one pool per one listener. + lbPool, err := openstackutil.GetPoolByListener(client, loadbalancer.ID, listener.ID) + if err != nil { + return nil, err + } + oldClusterName = getClusterName(fmt.Sprintf("%s[0-9]+_", poolPrefix), lbPool.Name) + if oldClusterName != clusterName { + if lbPool.MonitorID != "" { + // If monitor exists, let's handle it first, as we treat it as child of the pool. + monitor, err := openstackutil.GetHealthMonitor(client, lbPool.MonitorID) + if err != nil { + return nil, err + } + oldClusterName := getClusterName(fmt.Sprintf("%s[0-9]+_", monitorPrefix), monitor.Name) + if oldClusterName != clusterName { + monitor.Name = replaceClusterName(oldClusterName, clusterName, monitor.Name) + err = openstackutil.UpdateHealthMonitor(client, monitor.ID, monitors.UpdateOpts{Name: &monitor.Name}, loadbalancer.ID) + if err != nil { + return nil, err + } + } + } + + // Monitor is handled, let's rename the pool. + lbPool.Name = replaceClusterName(oldClusterName, clusterName, lbPool.Name) + err = openstackutil.UpdatePool(client, loadbalancer.ID, lbPool.ID, pools.UpdateOpts{Name: &lbPool.Name}) + if err != nil { + return nil, err + } + } + + for i, tag := range listener.Tags { + // There might be tags for shared listeners, that's why we analyze each tag on its own. + oldClusterNameTag := getClusterName("", tag) + if oldClusterNameTag != "" && oldClusterNameTag != clusterName { + listener.Tags[i] = replaceClusterName(oldClusterNameTag, clusterName, tag) + } + } + listener.Name = replaceClusterName(oldClusterName, clusterName, listener.Name) + err = openstackutil.UpdateListener(client, loadbalancer.ID, listener.ID, listeners.UpdateOpts{Name: &listener.Name, Tags: &listener.Tags}) + if err != nil { + return nil, err + } + } + } + + // At last we rename the LB. This is to make sure we only stop retrying to rename the LB once all of the children + // are handled. + for i, tag := range loadbalancer.Tags { + // There might be tags for shared lbs, that's why we analyze each tag on its own. + oldClusterNameTag := getClusterName("", tag) + if oldClusterNameTag != "" && oldClusterNameTag != clusterName { + loadbalancer.Tags[i] = replaceClusterName(oldClusterNameTag, clusterName, tag) + } + } + return openstackutil.UpdateLoadBalancer(client, loadbalancer.ID, loadbalancers.UpdateOpts{Name: &lbName, Tags: &loadbalancer.Tags}) +} diff --git a/pkg/openstack/loadbalancer_rename_test.go b/pkg/openstack/loadbalancer_rename_test.go new file mode 100644 index 0000000000..3e9f85cb13 --- /dev/null +++ b/pkg/openstack/loadbalancer_rename_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "k8s.io/cloud-provider-openstack/pkg/util" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestReplaceClusterName(t *testing.T) { + tests := []struct { + name string + oldClusterName string + clusterName string + objectName string + expected string + }{ + { + name: "Simple kubernetes replace", + oldClusterName: "kubernetes", + clusterName: "cluster123", + objectName: "kube_service_kubernetes_namespace_name", + expected: "kube_service_cluster123_namespace_name", + }, + { + name: "Simple kube replace", + oldClusterName: "kube", + clusterName: "cluster123", + objectName: "kube_service_kube_namespace_name", + expected: "kube_service_cluster123_namespace_name", + }, + { + name: "Replace, no prefix", + oldClusterName: "kubernetes", + clusterName: "cluster123", + objectName: "foobar_kubernetes_namespace_name", + expected: "foobar_cluster123_namespace_name", + }, + { + name: "Replace, not found", + oldClusterName: "foobar", + clusterName: "cluster123", + objectName: "kube_service_kubernetes_namespace_name", + expected: "kube_service_kubernetes_namespace_name", + }, + { + name: "Replace, cut 255", + oldClusterName: "kubernetes", + clusterName: "cluster123", + objectName: "kube_service_kubernetes_namespace_name" + strings.Repeat("foo", 100), + expected: "kube_service_cluster123_namespace_namefoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoo" + + "foofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoo" + + "foofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoofoof", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := replaceClusterName(test.oldClusterName, test.clusterName, test.objectName) + assert.Equal(t, test.expected, result) + }) + } +} + +func TestDecomposeLBName(t *testing.T) { + tests := []struct { + name string + resourcePrefix string + objectName string + expected string + }{ + { + name: "Simple kubernetes", + resourcePrefix: "", + objectName: "kube_service_kubernetes_namespace_name", + expected: "kubernetes", + }, + { + name: "Kubernetes with prefix", + resourcePrefix: "listener_", + objectName: "listener_kube_service_kubernetes_namespace_name", + expected: "kubernetes", + }, + { + name: "Example with _ in clusterName", + resourcePrefix: "listener_", + objectName: "listener_kube_service_kubernetes_123_namespace_name", + expected: "kubernetes_123", + }, + { + name: "No match", + resourcePrefix: "listener_", + objectName: "FOOBAR", + expected: "", + }, + { + name: "Looong namespace, so string is cut, but no _ in clusterName", + resourcePrefix: "listener_", + objectName: util.CutString255("listener_kube_service_kubernetes_namespace" + strings.Repeat("foo", 100) + "_name"), + expected: "kubernetes", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := getClusterName(test.resourcePrefix, test.objectName) + assert.Equal(t, test.expected, result) + }) + } +} diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 115ea2f7ea..7539bb34e6 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -1231,13 +1231,8 @@ func TestLbaasV2_updateServiceAnnotations(t *testing.T) { }, } - annotations := map[string]string{ - "key1": "value1", - "key2": "value2", - } - lbaas := LbaasV2{} - lbaas.updateServiceAnnotations(service, annotations) + lbaas.updateServiceAnnotation(service, "key1", "value1") serviceAnnotations := make([]map[string]string, 0) for key, value := range service.ObjectMeta.Annotations { @@ -1246,7 +1241,6 @@ func TestLbaasV2_updateServiceAnnotations(t *testing.T) { expectedAnnotations := []map[string]string{ {"key1": "value1"}, - {"key2": "value2"}, } assert.ElementsMatch(t, expectedAnnotations, serviceAnnotations) diff --git a/pkg/util/openstack/loadbalancer.go b/pkg/util/openstack/loadbalancer.go index 9cdebd173e..2e9d9ef629 100644 --- a/pkg/util/openstack/loadbalancer.go +++ b/pkg/util/openstack/loadbalancer.go @@ -255,17 +255,25 @@ func UpdateLoadBalancerTags(client *gophercloud.ServiceClient, lbID string, tags Tags: &tags, } + _, err := UpdateLoadBalancer(client, lbID, updateOpts) + + return err +} + +// UpdateLoadBalancer updates the load balancer +func UpdateLoadBalancer(client *gophercloud.ServiceClient, lbID string, updateOpts loadbalancers.UpdateOpts) (*loadbalancers.LoadBalancer, error) { mc := metrics.NewMetricContext("loadbalancer", "update") _, err := loadbalancers.Update(client, lbID, updateOpts).Extract() if mc.ObserveRequest(err) != nil { - return err + return nil, err } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { - return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating: %v", lbID, err) + lb, err := WaitActiveAndGetLoadBalancer(client, lbID) + if err != nil { + return nil, fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating: %v", lbID, err) } - return nil + return lb, nil } func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID string) error { @@ -546,6 +554,21 @@ func GetMembersbyPool(client *gophercloud.ServiceClient, poolID string) ([]pools return members, nil } +// UpdatePool updates a pool and wait for the lb active +func UpdatePool(client *gophercloud.ServiceClient, lbID string, poolID string, opts pools.UpdateOpts) error { + mc := metrics.NewMetricContext("loadbalancer_pool", "update") + _, err := pools.Update(client, poolID, opts).Extract() + if mc.ObserveRequest(err) != nil { + return err + } + + if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating pool: %v", lbID, err) + } + + return nil +} + // DeletePool deletes a pool. func DeletePool(client *gophercloud.ServiceClient, poolID string, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_pool", "delete") From dab0f067249eea1051e3bb57ed05d2cc3e326aa8 Mon Sep 17 00:00:00 2001 From: simon Date: Fri, 12 Apr 2024 11:24:52 +0200 Subject: [PATCH 16/23] added route section to cloud-config secret (#2570) --- charts/openstack-cloud-controller-manager/Chart.yaml | 2 +- .../templates/_helpers.tpl | 6 +++++- charts/openstack-cloud-controller-manager/values.yaml | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/charts/openstack-cloud-controller-manager/Chart.yaml b/charts/openstack-cloud-controller-manager/Chart.yaml index d7065b9f55..db642f7095 100644 --- a/charts/openstack-cloud-controller-manager/Chart.yaml +++ b/charts/openstack-cloud-controller-manager/Chart.yaml @@ -4,7 +4,7 @@ description: Openstack Cloud Controller Manager Helm Chart icon: https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/OpenStack-Logo-Vertical.png home: https://github.com/kubernetes/cloud-provider-openstack name: openstack-cloud-controller-manager -version: 2.29.1 +version: 2.29.2 maintainers: - name: eumel8 email: f.kloeker@telekom.de diff --git a/charts/openstack-cloud-controller-manager/templates/_helpers.tpl b/charts/openstack-cloud-controller-manager/templates/_helpers.tpl index 411e157324..b669c0b73d 100644 --- a/charts/openstack-cloud-controller-manager/templates/_helpers.tpl +++ b/charts/openstack-cloud-controller-manager/templates/_helpers.tpl @@ -60,8 +60,12 @@ Create cloud-config makro. {{- range $key, $value := .Values.cloudConfig.metadata }} {{ $key }} = {{ $value | quote }} {{- end }} -{{- end }} +[Route] +{{- range $key, $value := .Values.cloudConfig.route }} +{{ $key }} = {{ $value | quote }} +{{- end }} +{{- end }} {{/* Generate string of enabled controllers. Might have a trailing comma (,) which needs to be trimmed. diff --git a/charts/openstack-cloud-controller-manager/values.yaml b/charts/openstack-cloud-controller-manager/values.yaml index 2e5a708e3f..f656d6bd10 100644 --- a/charts/openstack-cloud-controller-manager/values.yaml +++ b/charts/openstack-cloud-controller-manager/values.yaml @@ -108,6 +108,7 @@ cloudConfig: loadBalancer: blockStorage: metadata: + route: # Allow for specifying internal IP addresses for multiple hostnames # hostAliases: From b343c1ba6d3a4cd9aa1fa8b68a5b1fada90ca6cd Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Wed, 24 Apr 2024 14:52:54 +0300 Subject: [PATCH 17/23] [cinder-csi-plugin] define availability zone for snapshot backup (#2569) * define availability zone for snapshot backup * fix to volume backup & restore creation * add doc --- .../using-cinder-csi-plugin.md | 1 + pkg/csi/cinder/controllerserver.go | 17 ++++++++++++----- pkg/csi/cinder/openstack/openstack.go | 2 +- pkg/csi/cinder/openstack/openstack_backups.go | 17 +++++++++-------- pkg/csi/cinder/openstack/openstack_mock.go | 12 ++++++------ pkg/csi/cinder/openstack/openstack_snapshots.go | 7 ++++--- pkg/csi/cinder/openstack/openstack_volumes.go | 4 ++-- tests/sanity/cinder/fakecloud.go | 15 ++++++++------- 8 files changed, 43 insertions(+), 32 deletions(-) diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md index 20638ea8a0..210d26a2c4 100644 --- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md +++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md @@ -269,6 +269,7 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi | VolumeSnapshotClass `parameters` | `force-create` | `false` | Enable to support creating snapshot for a volume in in-use status | | VolumeSnapshotClass `parameters` | `type` | Empty String | `snapshot` creates a VolumeSnapshot object linked to a Cinder volume snapshot. `backup` creates a VolumeSnapshot object linked to a cinder volume backup. Defaults to `snapshot` if not defined | | VolumeSnapshotClass `parameters` | `backup-max-duration-seconds-per-gb` | `20` | Defines the amount of time to wait for a backup to complete in seconds per GB of volume size | +| VolumeSnapshotClass `parameters` | `availability` | Same as volume | String. Backup Availability Zone | | Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| | Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder | diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 4768705ab8..fcf7aacbd7 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -136,7 +136,8 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // In case a snapshot is not found // check if a Backup with the same ID exists if backupsAreEnabled && cpoerrors.IsNotFound(err) { - back, err := cloud.GetBackupByID(snapshotID) + var back *backups.Backup + back, err = cloud.GetBackupByID(snapshotID) if err != nil { //If there is an error getting the backup as well, fail. return nil, status.Errorf(codes.NotFound, "VolumeContentSource Snapshot or Backup with ID %s not found", snapshotID) @@ -154,7 +155,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if cpoerrors.IsNotFound(err) && snapshotID == "" { return nil, err } - } if content != nil && content.GetVolume() != nil { @@ -420,10 +420,17 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS } if len(backups) == 1 { - backup = &backups[0] + // since backup.VolumeID is not part of ListBackups response + // we need fetch single backup to get the full object. + backup, err = cs.Cloud.GetBackupByID(backups[0].ID) + if err != nil { + klog.Errorf("Failed to get backup by ID %s: %v", backup.ID, err) + return nil, status.Error(codes.Internal, "Failed to get backup by ID") + } // Verify the existing backup has the same VolumeID, otherwise it belongs to another volume if backup.VolumeID != volumeID { + klog.Errorf("found existing backup for volumeID (%s) but different source volume ID (%s)", volumeID, backup.VolumeID) return nil, status.Error(codes.AlreadyExists, "Backup with given name already exists, with different source volume ID") } @@ -503,7 +510,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.Internal, fmt.Sprintf("GetBackupByID failed with error %v", err)) } - err = cs.Cloud.DeleteSnapshot(snap.ID) + err = cs.Cloud.DeleteSnapshot(backup.SnapshotID) if err != nil && !cpoerrors.IsNotFound(err) { klog.Errorf("Failed to DeleteSnapshot: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot failed with error %v", err)) @@ -593,7 +600,7 @@ func (cs *controllerServer) createBackup(name string, volumeID string, snap *sna } } - backup, err := cs.Cloud.CreateBackup(name, volumeID, snap.ID, properties) + backup, err := cs.Cloud.CreateBackup(name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) if err != nil { klog.Errorf("Failed to Create backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v", err)) diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index 4782e43545..ee603bae74 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -61,7 +61,7 @@ type IOpenStack interface { DeleteSnapshot(snapID string) error GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) WaitSnapshotReady(snapshotID string) (string, error) - CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) + CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) ListBackups(filters map[string]string) ([]backups.Backup, error) DeleteBackup(backupID string) error GetBackupByID(backupID string) (*backups.Backup, error) diff --git a/pkg/csi/cinder/openstack/openstack_backups.go b/pkg/csi/cinder/openstack/openstack_backups.go index 8ddc05cff0..ddc98cc624 100644 --- a/pkg/csi/cinder/openstack/openstack_backups.go +++ b/pkg/csi/cinder/openstack/openstack_backups.go @@ -44,7 +44,7 @@ const ( // CreateBackup issues a request to create a Backup from the specified Snapshot with the corresponding ID and // returns the resultant gophercloud Backup Item upon success. -func (os *OpenStack) CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) { +func (os *OpenStack) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { blockstorageServiceClient, err := openstack.NewBlockStorageV3(os.blockstorage.ProviderClient, os.epOpts) if err != nil { return &backups.Backup{}, err @@ -63,16 +63,17 @@ func (os *OpenStack) CreateBackup(name, volID string, snapshotID string, tags ma } opts := &backups.CreateOpts{ - VolumeID: volID, - SnapshotID: snapshotID, - Name: name, - Force: force, - Description: backupDescription, + VolumeID: volID, + SnapshotID: snapshotID, + Name: name, + Force: force, + Description: backupDescription, + AvailabilityZone: availabilityZone, } if tags != nil { - // Set openstack microversion to 3.43 to send metadata along with the backup - blockstorageServiceClient.Microversion = "3.43" + // Set openstack microversion to 3.51 to send metadata along with the backup + blockstorageServiceClient.Microversion = "3.51" opts.Metadata = tags } diff --git a/pkg/csi/cinder/openstack/openstack_mock.go b/pkg/csi/cinder/openstack/openstack_mock.go index 481e0157fc..14fa4bba09 100644 --- a/pkg/csi/cinder/openstack/openstack_mock.go +++ b/pkg/csi/cinder/openstack/openstack_mock.go @@ -315,12 +315,12 @@ func (_m *OpenStackMock) ListBackups(filters map[string]string) ([]backups.Backu return r0, r1 } -func (_m *OpenStackMock) CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) { - ret := _m.Called(name, volID, snapshotID, tags) +func (_m *OpenStackMock) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { + ret := _m.Called(name, volID, snapshotID, availabilityZone, tags) var r0 *backups.Backup - if rf, ok := ret.Get(0).(func(string, string, string, map[string]string) *backups.Backup); ok { - r0 = rf(name, volID, snapshotID, tags) + if rf, ok := ret.Get(0).(func(string, string, string, string, map[string]string) *backups.Backup); ok { + r0 = rf(name, volID, snapshotID, availabilityZone, tags) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*backups.Backup) @@ -328,8 +328,8 @@ func (_m *OpenStackMock) CreateBackup(name, volID string, snapshotID string, tag } var r1 error - if rf, ok := ret.Get(1).(func(string, string, string, map[string]string) error); ok { - r1 = rf(name, volID, snapshotID, tags) + if rf, ok := ret.Get(1).(func(string, string, string, string, map[string]string) error); ok { + r1 = rf(name, volID, snapshotID, availabilityZone, tags) } else { r1 = ret.Error(1) } diff --git a/pkg/csi/cinder/openstack/openstack_snapshots.go b/pkg/csi/cinder/openstack/openstack_snapshots.go index bd2f94365f..c3405de1eb 100644 --- a/pkg/csi/cinder/openstack/openstack_snapshots.go +++ b/pkg/csi/cinder/openstack/openstack_snapshots.go @@ -37,9 +37,10 @@ const ( snapReadyFactor = 1.2 snapReadySteps = 10 - snapshotDescription = "Created by OpenStack Cinder CSI driver" - SnapshotForceCreate = "force-create" - SnapshotType = "type" + snapshotDescription = "Created by OpenStack Cinder CSI driver" + SnapshotForceCreate = "force-create" + SnapshotType = "type" + SnapshotAvailabilityZone = "availability" ) // CreateSnapshot issues a request to take a Snapshot of the specified Volume with the corresponding ID and diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go index 96550975d3..3da3de550d 100644 --- a/pkg/csi/cinder/openstack/openstack_volumes.go +++ b/pkg/csi/cinder/openstack/openstack_volumes.go @@ -72,10 +72,10 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str return nil, err } - // creating volumes from backups is available since 3.47 microversion + // creating volumes from backups and backups cross-az is available since 3.51 microversion // https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id47 if !os.bsOpts.IgnoreVolumeMicroversion && sourceBackupID != "" { - blockstorageClient.Microversion = "3.47" + blockstorageClient.Microversion = "3.51" } mc := metrics.NewMetricContext("volume", "create") diff --git a/tests/sanity/cinder/fakecloud.go b/tests/sanity/cinder/fakecloud.go index f04f21fc67..cff3a1bc60 100644 --- a/tests/sanity/cinder/fakecloud.go +++ b/tests/sanity/cinder/fakecloud.go @@ -227,15 +227,16 @@ func (cloud *cloud) WaitSnapshotReady(snapshotID string) (string, error) { return "available", nil } -func (cloud *cloud) CreateBackup(name, volID string, snapshotID string, tags map[string]string) (*backups.Backup, error) { +func (cloud *cloud) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { backup := &backups.Backup{ - ID: randString(10), - Name: name, - Status: "available", - VolumeID: volID, - SnapshotID: snapshotID, - CreatedAt: time.Now(), + ID: randString(10), + Name: name, + Status: "available", + VolumeID: volID, + SnapshotID: snapshotID, + AvailabilityZone: &availabilityZone, + CreatedAt: time.Now(), } cloud.backups[backup.ID] = backup From 59963c8870c508f9816c66e18ca9d55643499b62 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Thu, 25 Apr 2024 16:16:17 +0200 Subject: [PATCH 18/23] Set `--use-service-account-credentials=false` (#2572) The above option seems to be causing CCM to create clients using ServiceAccount from the `kube-system` namespace, so requires users to either run in `kube-system` namespace, or manage 2 ServiceAccounts, one in `kube-system` and other in regular CCM namespace. See [1]. This commit changes this setting. [1] https://github.com/kubernetes/cloud-provider/blob/c3862938334ba18226098015193374fda40ab7a9/options/options.go#L230-L237 --- .../openstack-cloud-controller-manager/templates/daemonset.yaml | 2 +- .../openstack-cloud-controller-manager-ds.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/openstack-cloud-controller-manager/templates/daemonset.yaml b/charts/openstack-cloud-controller-manager/templates/daemonset.yaml index 1d1b74d66c..df14bd1665 100644 --- a/charts/openstack-cloud-controller-manager/templates/daemonset.yaml +++ b/charts/openstack-cloud-controller-manager/templates/daemonset.yaml @@ -54,7 +54,7 @@ spec: - --cloud-config=$(CLOUD_CONFIG) - --cluster-name=$(CLUSTER_NAME) - --cloud-provider=openstack - - --use-service-account-credentials=true + - --use-service-account-credentials=false - --controllers={{- trimAll "," (include "occm.enabledControllers" . ) -}} {{- if .Values.serviceMonitor.enabled }} - --bind-address=0.0.0.0 diff --git a/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml b/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml index f5ddc2e0b1..8ac4f52b8c 100644 --- a/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml +++ b/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml @@ -45,7 +45,7 @@ spec: - --cluster-name=$(CLUSTER_NAME) - --cloud-config=$(CLOUD_CONFIG) - --cloud-provider=openstack - - --use-service-account-credentials=true + - --use-service-account-credentials=false - --bind-address=127.0.0.1 volumeMounts: - mountPath: /etc/kubernetes/pki From 6387cbb63fbc605673e0a812228fa43d6346d025 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Thu, 25 Apr 2024 17:26:29 +0200 Subject: [PATCH 19/23] Update Cinder and Manila CSI charts maintainers (#2583) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The GitHub account brtknr does not exist anymore and our chart linter complains about this. This commit adds Matt and MichaƂ as the maintainers. Co-authored-by: MichaƂ Dulko --- charts/cinder-csi-plugin/Chart.yaml | 8 +++++--- charts/manila-csi-plugin/Chart.yaml | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/charts/cinder-csi-plugin/Chart.yaml b/charts/cinder-csi-plugin/Chart.yaml index 4037be2035..6babc70587 100644 --- a/charts/cinder-csi-plugin/Chart.yaml +++ b/charts/cinder-csi-plugin/Chart.yaml @@ -2,9 +2,11 @@ apiVersion: v1 appVersion: v1.29.0 description: Cinder CSI Chart for OpenStack name: openstack-cinder-csi -version: 2.29.0 +version: 2.29.1 home: https://github.com/kubernetes/cloud-provider-openstack icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png maintainers: - - name: brtknr - email: brtknr@bath.edu + - name: mnasiadka + email: mnasiadka@gmail.com + - name: mkjpryor + email: matt@stackhpc.com diff --git a/charts/manila-csi-plugin/Chart.yaml b/charts/manila-csi-plugin/Chart.yaml index 4345a43796..7d22d0574e 100644 --- a/charts/manila-csi-plugin/Chart.yaml +++ b/charts/manila-csi-plugin/Chart.yaml @@ -2,9 +2,11 @@ apiVersion: v1 appVersion: v1.29.0 description: Manila CSI Chart for OpenStack name: openstack-manila-csi -version: 2.29.0 +version: 2.29.1 home: http://github.com/kubernetes/cloud-provider-openstack icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png maintainers: - - name: brtknr - email: brtknr@bath.edu + - name: mnasiadka + email: mnasiadka@gmail.com + - name: mkjpryor + email: matt@stackhpc.com From b6d73d68171cd1fbd326b94b84b65ee43e7b56f1 Mon Sep 17 00:00:00 2001 From: Michal Dulko Date: Fri, 26 Apr 2024 11:43:53 +0200 Subject: [PATCH 20/23] Bump K8s to v1.30.0 (#2581) I also needed to flip order of mocks in Cinder CSI nodeserver tests and honestly I'm not exactly sure why. --- Dockerfile | 4 +- go.mod | 87 ++++++----- go.sum | 144 +++++++++--------- pkg/util/mount/mount_mock.go | 8 +- .../roles/install-golang/defaults/main.yml | 2 +- 5 files changed, 122 insertions(+), 123 deletions(-) diff --git a/Dockerfile b/Dockerfile index fb06487950..8ca79a5b41 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,7 @@ ## BUILD ARGS ## ################################################################################ # This build arg allows the specification of a custom Golang image. -ARG GOLANG_IMAGE=golang:1.21.5 +ARG GOLANG_IMAGE=golang:1.22.2 # The distroless image on which the CPI manager image is built. # @@ -22,7 +22,7 @@ ARG GOLANG_IMAGE=golang:1.21.5 # deterministic builds. Follow what kubernetes uses to build # kube-controller-manager, for example for 1.27.x: # https://github.com/kubernetes/kubernetes/blob/release-1.27/build/common.sh#L99 -ARG DISTROLESS_IMAGE=registry.k8s.io/build-image/go-runner:v2.3.1-go1.21.5-bookworm.0 +ARG DISTROLESS_IMAGE=registry.k8s.io/build-image/go-runner:v2.3.1-go1.22.2-bookworm.0 # We use Alpine as the source for default CA certificates and some output # images diff --git a/go.mod b/go.mod index 7cbdf69798..90cfaf7da5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module k8s.io/cloud-provider-openstack -go 1.21 +go 1.22.0 require ( github.com/container-storage-interface/spec v1.8.0 @@ -12,48 +12,48 @@ require ( github.com/kubernetes-csi/csi-test/v5 v5.0.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/onsi/ginkgo/v2 v2.13.0 - github.com/onsi/gomega v1.29.0 + github.com/onsi/ginkgo/v2 v2.15.0 + github.com/onsi/gomega v1.31.0 github.com/pborman/uuid v1.2.1 github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.8.4 - go.uber.org/goleak v1.2.1 - golang.org/x/net v0.19.0 - golang.org/x/sys v0.15.0 - golang.org/x/term v0.15.0 + go.uber.org/goleak v1.3.0 + golang.org/x/net v0.23.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 google.golang.org/grpc v1.58.3 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/godo.v2 v2.0.9 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/apiserver v0.29.1 - k8s.io/client-go v0.29.1 - k8s.io/cloud-provider v0.29.1 - k8s.io/component-base v0.29.1 - k8s.io/klog/v2 v2.110.1 - k8s.io/kms v0.29.1 - k8s.io/kubernetes v1.29.1 - k8s.io/mount-utils v0.29.1 + k8s.io/api v0.30.0 + k8s.io/apimachinery v0.30.0 + k8s.io/apiserver v0.30.0 + k8s.io/client-go v0.30.0 + k8s.io/cloud-provider v0.30.0 + k8s.io/component-base v0.30.0 + k8s.io/klog/v2 v2.120.1 + k8s.io/kms v0.30.0 + k8s.io/kubernetes v1.30.0 + k8s.io/mount-utils v0.30.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b software.sslmate.com/src/go-pkcs12 v0.2.0 ) // the below fixes the "go list -m all" execution replace ( - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.1 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.1 - k8s.io/endpointslice => k8s.io/endpointslice v0.29.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.1 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.30.0 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.30.0 + k8s.io/endpointslice => k8s.io/endpointslice v0.30.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.30.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.30.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.30.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.30.0 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.0 ) require ( @@ -74,7 +74,7 @@ require ( github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -82,8 +82,8 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -135,16 +135,15 @@ require ( go.opentelemetry.io/otel/sdk v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.16.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.5.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect @@ -154,15 +153,15 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.1 // indirect - k8s.io/component-helpers v0.29.1 // indirect - k8s.io/controller-manager v0.29.1 // indirect - k8s.io/csi-translation-lib v0.29.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/kubectl v0.29.1 // indirect - k8s.io/kubelet v0.29.1 // indirect - k8s.io/pod-security-admission v0.29.1 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect + k8s.io/apiextensions-apiserver v0.30.0 // indirect + k8s.io/component-helpers v0.30.0 // indirect + k8s.io/controller-manager v0.30.0 // indirect + k8s.io/csi-translation-lib v0.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubectl v0.30.0 // indirect + k8s.io/kubelet v0.30.0 // indirect + k8s.io/pod-security-admission v0.30.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index 2494c297e0..c4f6fd3f77 100644 --- a/go.sum +++ b/go.sum @@ -47,6 +47,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MichaelTJones/walk v0.0.0-20161122175330-4748e29d5718 h1:FSsoaa1q4jAaeiAUxf9H0PgFP7eA/UL6c3PdJH+nMN4= github.com/MichaelTJones/walk v0.0.0-20161122175330-4748e29d5718/go.mod h1:VVwKsx9Dc8rNG55BWqogoJzGubjKnRoXdUvpGbWqeCc= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -56,8 +58,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -128,12 +128,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -180,14 +180,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -319,15 +319,15 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= @@ -442,14 +442,12 @@ go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmY go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -460,8 +458,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -500,6 +498,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -541,8 +541,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -566,8 +566,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -621,12 +621,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -696,8 +696,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -808,8 +808,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -846,50 +846,50 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/cloud-provider v0.29.1 h1:bDLpOSpysWrtU2PCkvyP2sUTwRBa6MGCmxt68CRRW/8= -k8s.io/cloud-provider v0.29.1/go.mod h1:u50Drm6AbuoKpsVbAstNiFHGgbSVHuJV4TWN5imdM2w= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= -k8s.io/component-helpers v0.29.1 h1:54MMEDu6xeJmMtAKztsPwu0kJKr4+jCUzaEIn2UXRoc= -k8s.io/component-helpers v0.29.1/go.mod h1:+I7xz4kfUgxWAPJIVKrqe4ml4rb9UGpazlOmhXYo+cY= -k8s.io/controller-manager v0.29.1 h1:bTnJFF/OWooRVeJ4QLA1ApuPH+fjHSmcVMMeL7qvI2E= -k8s.io/controller-manager v0.29.1/go.mod h1:fVhGGuBiB0B2yT2+OHXZaA88owVn5zkv18A+G9E9Qlw= -k8s.io/csi-translation-lib v0.29.1 h1:b2tYZnnHyrQVHG6GYel7egmVvKeIlX/xbTNm9ynBSUg= -k8s.io/csi-translation-lib v0.29.1/go.mod h1:Zglui6PgFSew8ux50djwZ3PFK6eNrWktid66D7pHDDo= +k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= +k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= +k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= +k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= +k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= +k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= +k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= +k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/cloud-provider v0.30.0 h1:hz1MXkFjsyO167sRZVchXEi2YYMQ6kolBi79nuICjzw= +k8s.io/cloud-provider v0.30.0/go.mod h1:iyVcGvDfmZ7m5cliI9TTHj0VTjYDNpc/K71Gp6hukjU= +k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= +k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= +k8s.io/component-helpers v0.30.0 h1:xbJtNCfSM4SB/Tz5JqCKDZv4eT5LVi/AWQ1VOxhmStU= +k8s.io/component-helpers v0.30.0/go.mod h1:68HlSwXIumMKmCx8cZe1PoafQEYh581/sEpxMrkhmX4= +k8s.io/controller-manager v0.30.0 h1:jqqT8cK0Awdy0IfT0yuqYIRmwskbdzH5AEZqkuhEVMs= +k8s.io/controller-manager v0.30.0/go.mod h1:suM1r/pxUuk2ij5Bbm7W9kBLrFujXuzIboNuWK5AfRA= +k8s.io/csi-translation-lib v0.30.0 h1:pEe6jshNVE4od2AdgYlsAtiKP/MH+NcsBbUPA/dWA6U= +k8s.io/csi-translation-lib v0.30.0/go.mod h1:5TT/awOiKEX+8CcbReVYJyddT7xqlFrp3ChE9e45MyU= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kms v0.29.1 h1:6dMOaxllwiAZ8p3Hys65b78MDG+hONpBBpk1rQsaEtk= -k8s.io/kms v0.29.1/go.mod h1:Hqkx3zEGWThUTbcSkK508DUv4c1HOJOB5qihSoLBWgU= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= -k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= -k8s.io/kubelet v0.29.1 h1:cso8Dk8dymkj8q+EvW/aCbIYU2aOkH27gho48tYza/8= -k8s.io/kubelet v0.29.1/go.mod h1:hTl/naFcCVG1Ku17fMgj/krbheBwBkf3gnFhaboMx7E= -k8s.io/kubernetes v1.29.1 h1:fxJFVb8uqbYZDYHpwIsAndBQs360cQGb0xa1gYFh3fo= -k8s.io/kubernetes v1.29.1/go.mod h1:xZPKU0yO0CBbLTnbd+XGyRmmtmaVuJykDb8gNCkeeUE= -k8s.io/mount-utils v0.29.1 h1:veXlIm52Y4tm3H0pG03cOdkw0KOJxYDa0fQqhJCoqvQ= -k8s.io/mount-utils v0.29.1/go.mod h1:9IWJTMe8tG0MYMLEp60xK9GYVeCdA3g4LowmnVi+t9Y= -k8s.io/pod-security-admission v0.29.1 h1:PkIm6Di3Cd4cPmxSPeZhq7BLts5dq+xXyXbwCY67PIk= -k8s.io/pod-security-admission v0.29.1/go.mod h1:ecYSuWWsZbeM6shzommS6ZNVvQyr8sOJ9dUoGRt9gHM= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.30.0 h1:ZlnD/ei5lpvUlPw6eLfVvH7d8i9qZ6HwUQgydNVks8g= +k8s.io/kms v0.30.0/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.30.0 h1:xbPvzagbJ6RNYVMVuiHArC1grrV5vSmmIcSZuCdzRyk= +k8s.io/kubectl v0.30.0/go.mod h1:zgolRw2MQXLPwmic2l/+iHs239L49fhSeICuMhQQXTI= +k8s.io/kubelet v0.30.0 h1:/pqHVR2Rn8ExCpn211wL3pMtqRFpcBcJPl4+1INbIMk= +k8s.io/kubelet v0.30.0/go.mod h1:WukdKqbQxnj+csn3K8XOKeX7Sh60J/da25IILjvvB5s= +k8s.io/kubernetes v1.30.0 h1:u3Yw8rNlo2NDSGaDpoxoHXLPQnEu1tfqHATKOJe94HY= +k8s.io/kubernetes v1.30.0/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0= +k8s.io/mount-utils v0.30.0 h1:EceYTNYVabfpdtIAHC4KgMzoZkm1B8ovZ1J666mYZQI= +k8s.io/mount-utils v0.30.0/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo= +k8s.io/pod-security-admission v0.30.0 h1:C8J/zbrA3hVR7jatN+mN/ymUWxwU6KceS5HsEEt6rTY= +k8s.io/pod-security-admission v0.30.0/go.mod h1:eyzZB+gtMwnNduqr9tVO2vjf2DdepZsUA11SzyfXhfM= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 h1:Tc9rS7JJoZ9sl3OpL4842oIk6lH7gWBb0JOmJ0ute7M= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0/go.mod h1:1ewhL9l1gkPcU/IU/6rFYfikf+7Y5imWv7ARVbBOzNs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/pkg/util/mount/mount_mock.go b/pkg/util/mount/mount_mock.go index 5c1a0e8075..81514a962a 100644 --- a/pkg/util/mount/mount_mock.go +++ b/pkg/util/mount/mount_mock.go @@ -152,13 +152,13 @@ func (_m *MountMock) Mounter() *mount.SafeFormatAndMount { err error }{ { - cmd: "findmnt", - output: []byte("devicepath"), + cmd: "blkid", + output: []byte("UUID=\"1b47881a-1563-4896-a178-eec887b759de\" \n TYPE=\"ext4\""), err: nil, }, { - cmd: "blkid", - output: []byte("UUID=\"1b47881a-1563-4896-a178-eec887b759de\" \n TYPE=\"ext4\""), + cmd: "findmnt", + output: []byte("devicepath"), err: nil, }, } diff --git a/tests/playbooks/roles/install-golang/defaults/main.yml b/tests/playbooks/roles/install-golang/defaults/main.yml index 6af34fed40..d6110ecffb 100644 --- a/tests/playbooks/roles/install-golang/defaults/main.yml +++ b/tests/playbooks/roles/install-golang/defaults/main.yml @@ -1,5 +1,5 @@ --- -go_version: '1.21.5' +go_version: '1.22.2' arch: 'amd64' go_tarball: 'go{{ go_version }}.linux-{{ arch }}.tar.gz' go_download_location: 'https://go.dev/dl/{{ go_tarball }}' From 7a4290e8c7b0bcfdbcef1ac44e497c91f13c318b Mon Sep 17 00:00:00 2001 From: Kris Budde Date: Tue, 7 May 2024 11:36:27 +0200 Subject: [PATCH 21/23] [occm] KEP-1860: Add support for LoadBalancer ipMode (#2587) * KEP-1860: Add support for LoadBalancer ipMode * cleaner: use assertEqual for test --- pkg/openstack/loadbalancer.go | 28 +++++++++++++++++---------- pkg/openstack/loadbalancer_test.go | 31 ++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 10 deletions(-) diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index b9708c8920..895453b211 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -1568,18 +1568,26 @@ func (lbaas *LbaasV2) createLoadBalancerStatus(service *corev1.Service, svcConf status.Ingress = []corev1.LoadBalancerIngress{{Hostname: hostname}} return status } - // If the load balancer is using the PROXY protocol, expose its IP address via - // the Hostname field to prevent kube-proxy from injecting an iptables bypass. - // This is a workaround until - // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding - // is implemented (maybe in v1.22). - if svcConf.enableProxyProtocol && lbaas.opts.EnableIngressHostname { - fakeHostname := fmt.Sprintf("%s.%s", addr, lbaas.opts.IngressHostnameSuffix) - status.Ingress = []corev1.LoadBalancerIngress{{Hostname: fakeHostname}} - return status + + ipMode := corev1.LoadBalancerIPModeVIP + if svcConf.enableProxyProtocol { + // If the load balancer is using the PROXY protocol, expose its IP address via + // the Hostname field to prevent kube-proxy from injecting an iptables bypass. + // Setting must be removed by the user to allow the use of the LoadBalancerIPModeProxy. + if lbaas.opts.EnableIngressHostname { + fakeHostname := fmt.Sprintf("%s.%s", addr, lbaas.opts.IngressHostnameSuffix) + status.Ingress = []corev1.LoadBalancerIngress{{Hostname: fakeHostname}} + return status + } + // Set the LoadBalancerIPMode to Proxy to prevent kube-proxy from injecting an iptables bypass. + // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding + ipMode = corev1.LoadBalancerIPModeProxy } // Default to IP - status.Ingress = []corev1.LoadBalancerIngress{{IP: addr}} + status.Ingress = []corev1.LoadBalancerIngress{{ + IP: addr, + IPMode: &ipMode, + }} return status } diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 7539bb34e6..6f3dbcb8d7 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -709,12 +709,15 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { } } func TestLbaasV2_createLoadBalancerStatus(t *testing.T) { + ipmodeProxy := corev1.LoadBalancerIPModeProxy + ipmodeVIP := corev1.LoadBalancerIPModeVIP type fields struct { LoadBalancer LoadBalancer } type result struct { HostName string IPAddress string + IPMode *corev1.LoadBalancerIPMode } type args struct { service *corev1.Service @@ -800,6 +803,33 @@ func TestLbaasV2_createLoadBalancerStatus(t *testing.T) { }, want: result{ IPAddress: "10.10.0.6", + IPMode: &ipmodeVIP, + }, + }, + { + name: "it should return ipMode proxy if using proxyProtocol and not EnableIngressHostname", + fields: fields{ + LoadBalancer: LoadBalancer{ + opts: LoadBalancerOpts{ + EnableIngressHostname: false, + IngressHostnameSuffix: "ingress-suffix", + }, + }, + }, + args: args{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"test": "key"}, + }, + }, + svcConf: &serviceConfig{ + enableProxyProtocol: true, + }, + addr: "10.10.0.6", + }, + want: result{ + IPAddress: "10.10.0.6", + IPMode: &ipmodeProxy, }, }, } @@ -812,6 +842,7 @@ func TestLbaasV2_createLoadBalancerStatus(t *testing.T) { result := lbaas.createLoadBalancerStatus(tt.args.service, tt.args.svcConf, tt.args.addr) assert.Equal(t, tt.want.HostName, result.Ingress[0].Hostname) assert.Equal(t, tt.want.IPAddress, result.Ingress[0].IP) + assert.Equal(t, tt.want.IPMode, result.Ingress[0].IPMode) }) } } From 2f186d65c3f81eda8dfd075a092385f3000f06c2 Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Tue, 7 May 2024 15:36:05 +0300 Subject: [PATCH 22/23] Bump versions for 1.30 (#2589) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump versions for 1.30 * use rc2 for k3s --------- Co-authored-by: MichaƂ Dulko --- charts/cinder-csi-plugin/Chart.yaml | 4 ++-- charts/manila-csi-plugin/Chart.yaml | 4 ++-- charts/openstack-cloud-controller-manager/Chart.yaml | 4 ++-- .../using-keystone-webhook-authenticator-and-authorizer.md | 2 +- docs/magnum-auto-healer/using-magnum-auto-healer.md | 2 +- .../using-octavia-ingress-controller.md | 2 +- examples/webhook/keystone-deployment.yaml | 2 +- manifests/barbican-kms/ds.yaml | 2 +- manifests/barbican-kms/pod.yaml | 2 +- manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml | 2 +- manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml | 2 +- .../openstack-cloud-controller-manager-ds.yaml | 2 +- .../openstack-cloud-controller-manager-pod.yaml | 2 +- manifests/magnum-auto-healer/magnum-auto-healer.yaml | 2 +- manifests/manila-csi-plugin/csi-controllerplugin.yaml | 2 +- manifests/manila-csi-plugin/csi-nodeplugin.yaml | 2 +- tests/playbooks/roles/install-k3s/defaults/main.yaml | 2 +- tests/playbooks/test-csi-cinder-e2e.yaml | 2 +- 18 files changed, 21 insertions(+), 21 deletions(-) diff --git a/charts/cinder-csi-plugin/Chart.yaml b/charts/cinder-csi-plugin/Chart.yaml index 6babc70587..25c6668adf 100644 --- a/charts/cinder-csi-plugin/Chart.yaml +++ b/charts/cinder-csi-plugin/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 -appVersion: v1.29.0 +appVersion: v1.30.0 description: Cinder CSI Chart for OpenStack name: openstack-cinder-csi -version: 2.29.1 +version: 2.30.0 home: https://github.com/kubernetes/cloud-provider-openstack icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png maintainers: diff --git a/charts/manila-csi-plugin/Chart.yaml b/charts/manila-csi-plugin/Chart.yaml index 7d22d0574e..fd668767cf 100644 --- a/charts/manila-csi-plugin/Chart.yaml +++ b/charts/manila-csi-plugin/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 -appVersion: v1.29.0 +appVersion: v1.30.0 description: Manila CSI Chart for OpenStack name: openstack-manila-csi -version: 2.29.1 +version: 2.30.0 home: http://github.com/kubernetes/cloud-provider-openstack icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png maintainers: diff --git a/charts/openstack-cloud-controller-manager/Chart.yaml b/charts/openstack-cloud-controller-manager/Chart.yaml index db642f7095..26952f0fd3 100644 --- a/charts/openstack-cloud-controller-manager/Chart.yaml +++ b/charts/openstack-cloud-controller-manager/Chart.yaml @@ -1,10 +1,10 @@ apiVersion: v2 -appVersion: v1.29.0 +appVersion: v1.30.0 description: Openstack Cloud Controller Manager Helm Chart icon: https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/OpenStack-Logo-Vertical.png home: https://github.com/kubernetes/cloud-provider-openstack name: openstack-cloud-controller-manager -version: 2.29.2 +version: 2.30.0 maintainers: - name: eumel8 email: f.kloeker@telekom.de diff --git a/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md b/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md index 23c4f97c23..e2354560ae 100644 --- a/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md +++ b/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md @@ -252,7 +252,7 @@ it as a service. There are several things we need to notice in the deployment manifest: - We are using image - `registry.k8s.io/provider-os/k8s-keystone-auth:v1.29.0` + `registry.k8s.io/provider-os/k8s-keystone-auth:v1.30.0` - We use `k8s-auth-policy` configmap created above. - The pod uses service account `keystone-auth` created above. - We use `keystone-auth-certs` secret created above to inject the diff --git a/docs/magnum-auto-healer/using-magnum-auto-healer.md b/docs/magnum-auto-healer/using-magnum-auto-healer.md index 1748438e49..da42434569 100644 --- a/docs/magnum-auto-healer/using-magnum-auto-healer.md +++ b/docs/magnum-auto-healer/using-magnum-auto-healer.md @@ -73,7 +73,7 @@ user_id=ceb61464a3d341ebabdf97d1d4b97099 user_project_id=b23a5e41d1af4c20974bf58b4dff8e5a password=password region=RegionOne -image=registry.k8s.io/provider-os/magnum-auto-healer:v1.29.0 +image=registry.k8s.io/provider-os/magnum-auto-healer:v1.30.0 cat < /etc/kubernetes/octavia-ingress-controller/deployment.yaml --- diff --git a/examples/webhook/keystone-deployment.yaml b/examples/webhook/keystone-deployment.yaml index d4a485a6ea..9064c57cd6 100644 --- a/examples/webhook/keystone-deployment.yaml +++ b/examples/webhook/keystone-deployment.yaml @@ -18,7 +18,7 @@ spec: serviceAccountName: k8s-keystone containers: - name: k8s-keystone-auth - image: registry.k8s.io/provider-os/k8s-keystone-auth:v1.29.0 + image: registry.k8s.io/provider-os/k8s-keystone-auth:v1.30.0 args: - ./bin/k8s-keystone-auth - --tls-cert-file diff --git a/manifests/barbican-kms/ds.yaml b/manifests/barbican-kms/ds.yaml index bf197687ec..f4de56db0c 100644 --- a/manifests/barbican-kms/ds.yaml +++ b/manifests/barbican-kms/ds.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: cloud-controller-manager containers: - name: barbican-kms - image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.29.0 + image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.30.0 args: - /bin/barbican-kms-plugin - --socketpath=$(KMS_ENDPOINT) diff --git a/manifests/barbican-kms/pod.yaml b/manifests/barbican-kms/pod.yaml index 2ab64627c1..f031a79416 100644 --- a/manifests/barbican-kms/pod.yaml +++ b/manifests/barbican-kms/pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: barbican-kms - image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.29.0 + image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.30.0 args: - "--socketpath=/kms/kms.sock" - "--cloud-config=/etc/kubernetes/cloud-config" diff --git a/manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml b/manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml index ffd3ee12ea..bdea87f08d 100644 --- a/manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml +++ b/manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml @@ -93,7 +93,7 @@ spec: - mountPath: /var/lib/csi/sockets/pluginproxy/ name: socket-dir - name: cinder-csi-plugin - image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.29.0 + image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.30.0 args: - /bin/cinder-csi-plugin - "--endpoint=$(CSI_ENDPOINT)" diff --git a/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml b/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml index 705cccfea2..d87b0690cb 100644 --- a/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml +++ b/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml @@ -53,7 +53,7 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.29.0 + image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.30.0 args: - /bin/cinder-csi-plugin - "--endpoint=$(CSI_ENDPOINT)" diff --git a/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml b/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml index 8ac4f52b8c..004e95c6c2 100644 --- a/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml +++ b/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml @@ -38,7 +38,7 @@ spec: serviceAccountName: cloud-controller-manager containers: - name: openstack-cloud-controller-manager - image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.29.0 + image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.30.0 args: - /bin/openstack-cloud-controller-manager - --v=1 diff --git a/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml b/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml index dfcc05dc9e..ccf59240a4 100644 --- a/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml +++ b/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml @@ -11,7 +11,7 @@ metadata: spec: containers: - name: openstack-cloud-controller-manager - image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.29.0 + image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.30.0 args: - /bin/openstack-cloud-controller-manager - --v=1 diff --git a/manifests/magnum-auto-healer/magnum-auto-healer.yaml b/manifests/magnum-auto-healer/magnum-auto-healer.yaml index 9c4227219c..5b35333943 100644 --- a/manifests/magnum-auto-healer/magnum-auto-healer.yaml +++ b/manifests/magnum-auto-healer/magnum-auto-healer.yaml @@ -88,7 +88,7 @@ spec: node-role.kubernetes.io/control-plane: "" containers: - name: magnum-auto-healer - image: registry.k8s.io/provider-os/magnum-auto-healer:v1.29.0 + image: registry.k8s.io/provider-os/magnum-auto-healer:v1.30.0 imagePullPolicy: Always args: - /bin/magnum-auto-healer diff --git a/manifests/manila-csi-plugin/csi-controllerplugin.yaml b/manifests/manila-csi-plugin/csi-controllerplugin.yaml index a60e5234e0..a805d18879 100644 --- a/manifests/manila-csi-plugin/csi-controllerplugin.yaml +++ b/manifests/manila-csi-plugin/csi-controllerplugin.yaml @@ -77,7 +77,7 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: registry.k8s.io/provider-os/manila-csi-plugin:v1.29.0 + image: registry.k8s.io/provider-os/manila-csi-plugin:v1.30.0 command: ["/bin/sh", "-c", '/bin/manila-csi-plugin --nodeid=$(NODE_ID) diff --git a/manifests/manila-csi-plugin/csi-nodeplugin.yaml b/manifests/manila-csi-plugin/csi-nodeplugin.yaml index afc0d66c76..2bd6584c1e 100644 --- a/manifests/manila-csi-plugin/csi-nodeplugin.yaml +++ b/manifests/manila-csi-plugin/csi-nodeplugin.yaml @@ -50,7 +50,7 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: registry.k8s.io/provider-os/manila-csi-plugin:v1.29.0 + image: registry.k8s.io/provider-os/manila-csi-plugin:v1.30.0 command: ["/bin/sh", "-c", '/bin/manila-csi-plugin --nodeid=$(NODE_ID) diff --git a/tests/playbooks/roles/install-k3s/defaults/main.yaml b/tests/playbooks/roles/install-k3s/defaults/main.yaml index 32ac816b7b..aa024460c8 100644 --- a/tests/playbooks/roles/install-k3s/defaults/main.yaml +++ b/tests/playbooks/roles/install-k3s/defaults/main.yaml @@ -1,5 +1,5 @@ --- -k3s_release: v1.29.0+k3s1 +k3s_release: v1.30.0-rc2+k3s1 worker_node_count: 1 cluster_token: "K1039d1cf76d1f8b0e8b0d48e7c60d9c4a43c2e7a56de5d86f346f2288a2677f1d7::server:2acba4e60918c0e2d1f1d1a7c4e81e7b" devstack_workdir: "{{ ansible_user_dir }}/devstack" diff --git a/tests/playbooks/test-csi-cinder-e2e.yaml b/tests/playbooks/test-csi-cinder-e2e.yaml index ac9a7a401a..cca5406d7c 100644 --- a/tests/playbooks/test-csi-cinder-e2e.yaml +++ b/tests/playbooks/test-csi-cinder-e2e.yaml @@ -4,7 +4,7 @@ gather_facts: true vars: - e2e_test_version: v1.29.0 + e2e_test_version: v1.30.0 user: stack global_env: {} devstack_workdir: /home/{{ user }}/devstack From 89aa2cc9e5c340e35a6208ab0949183a78528ba4 Mon Sep 17 00:00:00 2001 From: k8s-infra-cherrypick-robot <90416843+k8s-infra-cherrypick-robot@users.noreply.github.com> Date: Tue, 28 May 2024 03:21:22 -0700 Subject: [PATCH 23/23] [release-1.30] [occm] add a node selector support for loadbalancer services (#2603) * POC of TargetNodeLabels selector on OpenStack LB * Fix type errors * Update implementation of getKeyValuePropertiesFromServiceAnnotation * gofmt -w -s ./pkg * Polish the code and add documentation --------- Co-authored-by: Ririko Nakamura Co-authored-by: kayrus --- ...cations-using-loadbalancer-type-service.md | 6 ++ ...sing-openstack-cloud-controller-manager.md | 19 +++- pkg/openstack/loadbalancer.go | 80 ++++++++++++-- pkg/openstack/loadbalancer_test.go | 101 ++++++++++++++++++ pkg/openstack/openstack.go | 4 +- pkg/util/util.go | 26 +++++ pkg/util/util_test.go | 49 +++++++++ 7 files changed, 277 insertions(+), 8 deletions(-) create mode 100644 pkg/util/util_test.go diff --git a/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md b/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md index 219adc4e90..5fa8577006 100644 --- a/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md +++ b/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md @@ -236,6 +236,12 @@ Request Body: This annotation is automatically added and it contains the floating ip address of the load balancer service. When using `loadbalancer.openstack.org/hostname` annotation it is the only place to see the real address of the load balancer. +- `loadbalancer.openstack.org/node-selector` + + A set of key=value annotations used to filter nodes for targeting by the load balancer. When defined, only nodes that match all the specified key=value annotations will be targeted. If an annotation includes only a key without a value, the filter will check only for the existence of the key on the node. If the value is not set, the `node-selector` value defined in the OCCM configuration is applied. + + Example: To filter nodes with the labels `env=production` and `region=default`, set the `loadbalancer.openstack.org/node-selector` annotation to `env=production, region=default` + ### Switching between Floating Subnets by using preconfigured Classes If you have multiple `FloatingIPPools` and/or `FloatingIPSubnets` it might be desirable to offer the user logical meanings for `LoadBalancers` like `internetFacing` or `DMZ` instead of requiring the user to select a dedicated network or subnet ID at the service object level as an annotation. diff --git a/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md b/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md index 06bc54bb17..3379c861f8 100644 --- a/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md +++ b/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md @@ -207,7 +207,7 @@ Although the openstack-cloud-controller-manager was initially implemented with N * `ROUND_ROBIN` (default) * `LEAST_CONNECTIONS` * `SOURCE_IP` - + If `lb-provider` is set to "ovn" the value must be set to `SOURCE_IP_PORT`. * `lb-provider` @@ -248,6 +248,23 @@ Although the openstack-cloud-controller-manager was initially implemented with N * `internal-lb` Determines whether or not to create an internal load balancer (no floating IP) by default. Default: false. +* `node-selector` + A comma separated list of key=value annotations used to filter nodes for targeting by the load balancer. When defined, only nodes that match all the specified key=value annotations will be targeted. If an annotation includes only a key without a value, the filter will check only for the existence of the key on the node. When node-selector is not set (default value), all nodes will be added as members to a load balancer pool. + + Note: This configuration option can be overridden with the `loadbalancer.openstack.org/node-selector` service annotation. Refer to [Exposing applications using services of LoadBalancer type](./expose-applications-using-loadbalancer-type-service.md) + + Example: To filter nodes with the labels `env=production` and `region=default`, set the `node-selector` as follows: + + ``` + node-selector="env=production, region=default" + ``` + + Example: To filter nodes that have the key `env` with any value and the key `region` specifically set to `default`, set the `node-selector` as follows: + + ``` + node-selector="env, region=default" + ``` + * `cascade-delete` Determines whether or not to perform cascade deletion of load balancers. Default: true. diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index 895453b211..7cf74011d6 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -56,6 +56,7 @@ const ( annotationXForwardedFor = "X-Forwarded-For" ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/openstack-internal-load-balancer" + ServiceAnnotationLoadBalancerNodeSelector = "loadbalancer.openstack.org/node-selector" ServiceAnnotationLoadBalancerConnLimit = "loadbalancer.openstack.org/connection-limit" ServiceAnnotationLoadBalancerFloatingNetworkID = "loadbalancer.openstack.org/floating-network-id" ServiceAnnotationLoadBalancerFloatingSubnet = "loadbalancer.openstack.org/floating-subnet" @@ -119,6 +120,7 @@ type serviceConfig struct { lbMemberSubnetID string lbPublicNetworkID string lbPublicSubnetSpec *floatingSubnetSpec + nodeSelectors map[string]string keepClientIP bool enableProxyProtocol bool timeoutClientData int @@ -405,6 +407,14 @@ func nodeAddressForLB(node *corev1.Node, preferredIPFamily corev1.IPFamily) (str return "", cpoerrors.ErrNoAddressFound } +// getKeyValueFromServiceAnnotation converts a comma-separated list of key-value +// pairs from the specified annotation into a map or returns the specified +// defaultSetting if the annotation is empty +func getKeyValueFromServiceAnnotation(service *corev1.Service, annotationKey string, defaultSetting string) map[string]string { + annotationValue := getStringFromServiceAnnotation(service, annotationKey, defaultSetting) + return cpoutil.StringToMap(annotationValue) +} + // getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting func getStringFromServiceAnnotation(service *corev1.Service, annotationKey string, defaultSetting string) string { klog.V(4).Infof("getStringFromServiceAnnotation(%s/%s, %v, %v)", service.Namespace, service.Name, annotationKey, defaultSetting) @@ -1229,6 +1239,16 @@ func (lbaas *LbaasV2) checkServiceUpdate(service *corev1.Service, nodes []*corev svcConf.lbID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerID, "") svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) + // Get service node-selector annotations + svcConf.nodeSelectors = getKeyValueFromServiceAnnotation(service, ServiceAnnotationLoadBalancerNodeSelector, lbaas.opts.NodeSelector) + for key, value := range svcConf.nodeSelectors { + if value == "" { + klog.V(3).InfoS("Target node label %s key is set to LoadBalancer service %s", key, serviceName) + } else { + klog.V(3).InfoS("Target node label %s=%s is set to LoadBalancer service %s", key, value, serviceName) + } + } + // Find subnet ID for creating members memberSubnetID, err := lbaas.getMemberSubnetID(service) if err != nil { @@ -1314,6 +1334,16 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node svcConf.lbID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerID, "") svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) + // Get service node-selector annotations + svcConf.nodeSelectors = getKeyValueFromServiceAnnotation(service, ServiceAnnotationLoadBalancerNodeSelector, lbaas.opts.NodeSelector) + for key, value := range svcConf.nodeSelectors { + if value == "" { + klog.V(3).InfoS("Target node label %s key is set to LoadBalancer service %s", key, serviceName) + } else { + klog.V(3).InfoS("Target node label %s=%s is set to LoadBalancer service %s", key, value, serviceName) + } + } + // If in the config file internal-lb=true, user is not allowed to create external service. if lbaas.opts.InternalLB { if !getBoolFromServiceAnnotation(service, ServiceAnnotationLoadBalancerInternal, false) { @@ -1602,6 +1632,9 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName return nil, err } + // apply node-selector to a list of nodes + filteredNodes := filterNodes(nodes, svcConf.nodeSelectors) + // Use more meaningful name for the load balancer but still need to check the legacy name for backward compatibility. lbName := lbaas.GetLoadBalancerName(ctx, clusterName, service) svcConf.lbName = lbName @@ -1666,7 +1699,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName return nil, fmt.Errorf("error getting loadbalancer for Service %s: %v", serviceName, err) } klog.InfoS("Creating loadbalancer", "lbName", lbName, "service", klog.KObj(service)) - loadbalancer, err = lbaas.createOctaviaLoadBalancer(lbName, clusterName, service, nodes, svcConf) + loadbalancer, err = lbaas.createOctaviaLoadBalancer(lbName, clusterName, service, filteredNodes, svcConf) if err != nil { return nil, fmt.Errorf("error creating loadbalancer %s: %v", lbName, err) } @@ -1712,7 +1745,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName return nil, err } - pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, lbName), listener, service, port, nodes, svcConf) + pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, lbName), listener, service, port, filteredNodes, svcConf) if err != nil { return nil, err } @@ -1765,7 +1798,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName status := lbaas.createLoadBalancerStatus(service, svcConf, addr) if lbaas.opts.ManageSecurityGroups { - err := lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, service, nodes, svcConf) + err := lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, service, filteredNodes, svcConf) if err != nil { return status, fmt.Errorf("failed when reconciling security groups for LB service %v/%v: %v", service.Namespace, service.Name, err) } @@ -1818,8 +1851,11 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName return err } + // apply node-selector to a list of nodes + filteredNodes := filterNodes(nodes, svcConf.nodeSelectors) + serviceName := fmt.Sprintf("%s/%s", service.Namespace, service.Name) - klog.V(2).Infof("Updating %d nodes for Service %s in cluster %s", len(nodes), serviceName, clusterName) + klog.V(2).Infof("Updating %d nodes for Service %s in cluster %s", len(filteredNodes), serviceName, clusterName) // Get load balancer var loadbalancer *loadbalancers.LoadBalancer @@ -1866,7 +1902,7 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName return fmt.Errorf("loadbalancer %s does not contain required listener for port %d and protocol %s", loadbalancer.ID, port.Port, port.Protocol) } - pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, loadbalancer.Name), &listener, service, port, nodes, svcConf) + pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, loadbalancer.Name), &listener, service, port, filteredNodes, svcConf) if err != nil { return err } @@ -1878,7 +1914,7 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName } if lbaas.opts.ManageSecurityGroups { - err := lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, service, nodes, svcConf) + err := lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, service, filteredNodes, svcConf) if err != nil { return fmt.Errorf("failed to update Security Group for loadbalancer service %s: %v", serviceName, err) } @@ -2184,3 +2220,35 @@ func PreserveGopherError(rawError error) error { } return rawError } + +// filterNodes uses node labels to filter the nodes that should be targeted by the LB, +// ensuring that all the labels provided in an annotation are present on the nodes +func filterNodes(nodes []*corev1.Node, filterLabels map[string]string) []*corev1.Node { + if len(filterLabels) == 0 { + return nodes + } + + filteredNodes := make([]*corev1.Node, 0, len(nodes)) + for _, node := range nodes { + if matchNodeLabels(node, filterLabels) { + filteredNodes = append(filteredNodes, node) + } + } + + return filteredNodes +} + +// matchNodeLabels checks if a node has all the labels in filterLabels with matching values +func matchNodeLabels(node *corev1.Node, filterLabels map[string]string) bool { + if node == nil || len(node.Labels) == 0 { + return false + } + + for k, v := range filterLabels { + if nodeLabelValue, ok := node.Labels[k]; !ok || (v != "" && nodeLabelValue != v) { + return false + } + } + + return true +} diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 6f3dbcb8d7..bf3b5d63e2 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -2452,3 +2452,104 @@ func TestBuildListenerCreateOpt(t *testing.T) { }) } } + +func TestFilterNodes(t *testing.T) { + tests := []struct { + name string + nodeLabels map[string]string + service *corev1.Service + annotationKey string + defaultSetting map[string]string + nodeFiltered bool + }{ + { + name: "when no filter is provided, node should be filtered", + nodeLabels: map[string]string{"k1": "v1"}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{}, + }, + annotationKey: ServiceAnnotationLoadBalancerNodeSelector, + defaultSetting: make(map[string]string), + nodeFiltered: true, + }, + { + name: "when all key-value filters match, node should be filtered", + nodeLabels: map[string]string{"k1": "v1", "k2": "v2"}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ServiceAnnotationLoadBalancerNodeSelector: "k1=v1,k2=v2"}, + }, + }, + annotationKey: ServiceAnnotationLoadBalancerNodeSelector, + defaultSetting: make(map[string]string), + nodeFiltered: true, + }, + { + name: "when all key-value filters match and a key value contains equals sign, node should be filtered", + nodeLabels: map[string]string{"k1": "v1", "k2": "v2=true"}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ServiceAnnotationLoadBalancerNodeSelector: "k1=v1,k2=v2=true"}, + }, + }, + annotationKey: ServiceAnnotationLoadBalancerNodeSelector, + defaultSetting: make(map[string]string), + nodeFiltered: true, + }, + { + name: "when all just-key filter match, node should be filtered", + nodeLabels: map[string]string{"k1": "v1", "k2": "v2"}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ServiceAnnotationLoadBalancerNodeSelector: "k1,k2"}, + }, + }, + annotationKey: ServiceAnnotationLoadBalancerNodeSelector, + defaultSetting: make(map[string]string), + nodeFiltered: true, + }, + { + name: "when some filters do not match, node should not be filtered", + nodeLabels: map[string]string{"k1": "v1"}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ServiceAnnotationLoadBalancerNodeSelector: " k1=v1, k2 "}, + }, + }, + annotationKey: ServiceAnnotationLoadBalancerNodeSelector, + defaultSetting: make(map[string]string), + nodeFiltered: false, + }, + { + name: "when no filter matches, node should not be filtered", + nodeLabels: map[string]string{"k1": "v1", "k2": "v2"}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ServiceAnnotationLoadBalancerNodeSelector: "k3=v3"}, + }, + }, + annotationKey: ServiceAnnotationLoadBalancerNodeSelector, + defaultSetting: make(map[string]string), + nodeFiltered: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + node := &corev1.Node{} + node.Labels = test.nodeLabels + + // TODO: add testArgs + targetNodeLabels := getKeyValueFromServiceAnnotation(test.service, ServiceAnnotationLoadBalancerNodeSelector, "") + + nodes := []*corev1.Node{node} + filteredNodes := filterNodes(nodes, targetNodeLabels) + + if test.nodeFiltered { + assert.Equal(t, nodes, filteredNodes) + } else { + assert.Empty(t, filteredNodes) + } + }) + } +} diff --git a/pkg/openstack/openstack.go b/pkg/openstack/openstack.go index aee84846cd..1cc97ca480 100644 --- a/pkg/openstack/openstack.go +++ b/pkg/openstack/openstack.go @@ -114,7 +114,8 @@ type LoadBalancerOpts struct { MonitorMaxRetries uint `gcfg:"monitor-max-retries"` MonitorMaxRetriesDown uint `gcfg:"monitor-max-retries-down"` ManageSecurityGroups bool `gcfg:"manage-security-groups"` - InternalLB bool `gcfg:"internal-lb"` // default false + InternalLB bool `gcfg:"internal-lb"` // default false + NodeSelector string `gcfg:"node-selector"` // If specified, the loadbalancer members will be assined only from nodes list filtered by node-selector labels CascadeDelete bool `gcfg:"cascade-delete"` FlavorID string `gcfg:"flavor-id"` AvailabilityZone string `gcfg:"availability-zone"` @@ -222,6 +223,7 @@ func ReadConfig(config io.Reader) (Config, error) { // Set default values explicitly cfg.LoadBalancer.Enabled = true cfg.LoadBalancer.InternalLB = false + cfg.LoadBalancer.NodeSelector = "" cfg.LoadBalancer.LBProvider = "amphora" cfg.LoadBalancer.LBMethod = "ROUND_ROBIN" cfg.LoadBalancer.CreateMonitor = false diff --git a/pkg/util/util.go b/pkg/util/util.go index 40e05441cb..fdd76dcb5b 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "time" "github.com/container-storage-interface/spec/lib/go/csi" @@ -77,6 +78,31 @@ func Contains(list []string, strToSearch string) bool { return false } +// StringToMap converts a string of comma-separated key-values into a map +func StringToMap(str string) map[string]string { + // break up a "key1=val,key2=val2,key3=,key4" string into a list + values := strings.Split(strings.TrimSpace(str), ",") + keyValues := make(map[string]string, len(values)) + + for _, kv := range values { + kv := strings.SplitN(strings.TrimSpace(kv), "=", 2) + + k := kv[0] + if len(kv) == 1 { + if k != "" { + // process "key=" or "key" + keyValues[k] = "" + } + continue + } + + // process "key=val" or "key=val=foo" + keyValues[k] = kv[1] + } + + return keyValues +} + // RoundUpSize calculates how many allocation units are needed to accommodate // a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS // allocates volumes in gibibyte-sized chunks, diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go new file mode 100644 index 0000000000..4a6cb7f615 --- /dev/null +++ b/pkg/util/util_test.go @@ -0,0 +1,49 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringToMap(t *testing.T) { + tests := []struct { + name string + in string + out map[string]string + }{ + { + name: "test1", + in: "k1=v1,k2=v2", + out: map[string]string{"k1": "v1", "k2": "v2"}, + }, + { + name: "test2", + in: "k1=v1,k2=v2=true", + out: map[string]string{"k1": "v1", "k2": "v2=true"}, + }, + { + name: "test3", + in: "k1,k2", + out: map[string]string{"k1": "", "k2": ""}, + }, + { + name: "test4", + in: " k1=v1, k2 ", + out: map[string]string{"k1": "v1", "k2": ""}, + }, + { + name: "test5", + in: "k3=v3,=emptykey", + out: map[string]string{"k3": "v3", "": "emptykey"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + out := StringToMap(test.in) + + assert.Equal(t, test.out, out) + }) + } +}