From a5ffd1d65ab6dcffd5b4869fbea7a1dfb0b3d2fc Mon Sep 17 00:00:00 2001 From: killianmuldoon Date: Fri, 29 Sep 2023 13:45:12 +0100 Subject: [PATCH] Add clusterctl upgrade test Signed-off-by: killianmuldoon --- .gitignore | 6 +- Makefile | 48 +- .../flavors/clusterclass_generators.go | 9 +- packaging/flavorgen/flavors/patches.go | 55 +- templates/clusterclass-template.yaml | 34 +- test/e2e/clusterctl_upgrade_test.go | 70 ++ test/e2e/config/vsphere-ci.yaml | 88 +- test/e2e/config/vsphere-dev.yaml | 92 +- .../main/clusterclass/kustomization.yaml | 11 + .../clusterclass/patch-prekubeadmscript.yaml | 51 + .../clusterclass/patch-vsphere-template.yaml | 37 + .../cluster-resource-set-csi-insecure.yaml | 4 +- .../remote-management/image-injection.yaml | 43 +- .../main/remote-management/kustomization.yaml | 8 +- .../v1.7/bases/cluster-network-CIDR.yaml | 10 + .../cluster-resource-set-csi-insecure.yaml | 28 + .../bases/cluster-resource-set-label.yaml | 7 + .../v1.7/bases/cluster-resource-set.yaml | 18 + .../v1.7/bases/cluster-template-topology.yaml | 835 +++++++++++++++ .../v1.7/bases/cluster.yaml | 996 +++++++++++++++++ .../v1.7/bases/remove-storage-policy.yaml | 2 + .../clusterclass-quick-start.yaml | 297 ++++++ .../v1.7/clusterclass/kustomization.yaml | 13 + .../clusterclass/patch-prekubeadmscript.yaml | 51 + .../clusterclass/patch-vsphere-template.yaml | 37 + .../v1.7/topology/kustomization.yaml | 9 + .../v1.7/workload/kustomization.yaml | 8 + .../workload-control-plane-endpoint-ip.yaml | 5 + .../v1.8/bases/cluster-network-CIDR.yaml | 10 + .../cluster-resource-set-csi-insecure.yaml | 28 + .../bases/cluster-resource-set-label.yaml | 7 + .../v1.8/bases/cluster-resource-set.yaml | 18 + .../v1.8/bases/cluster-template-topology.yaml | 835 +++++++++++++++ .../v1.8/bases/cluster.yaml | 998 ++++++++++++++++++ .../v1.8/bases/remove-storage-policy.yaml | 2 + .../v1.8/cluster-template/kustomization.yaml | 13 + .../clusterclass-quick-start.yaml | 299 ++++++ .../v1.8/clusterclass/kustomization.yaml | 13 + .../clusterclass/patch-prekubeadmscript.yaml | 51 + .../clusterclass/patch-vsphere-template.yaml | 37 + .../v1.8/topology/kustomization.yaml | 13 + .../v1.8/workload/kustomization.yaml | 8 + .../workload-control-plane-endpoint-ip.yaml | 5 + .../data/shared/v1.7/v1beta1/metadata.yaml | 32 + .../v1.7/v1beta1_provider/metadata.yaml | 20 + .../data/shared/v1.8/v1beta1/metadata.yaml | 35 + .../v1.8/v1beta1_provider/metadata.yaml | 23 + 47 files changed, 5232 insertions(+), 87 deletions(-) create mode 100644 test/e2e/clusterctl_upgrade_test.go create mode 100644 test/e2e/data/infrastructure-vsphere/main/clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-prekubeadmscript.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-vsphere-template.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-network-CIDR.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-csi-insecure.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-label.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-template-topology.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/bases/remove-storage-policy.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-prekubeadmscript.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-vsphere-template.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/workload/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.7/workload/workload-control-plane-endpoint-ip.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-network-CIDR.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-csi-insecure.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-label.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-template-topology.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/bases/remove-storage-policy.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/cluster-template/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/clusterclass-quick-start.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-prekubeadmscript.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-vsphere-template.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/topology/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/workload/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere/v1.8/workload/workload-control-plane-endpoint-ip.yaml create mode 100644 test/e2e/data/shared/v1.7/v1beta1/metadata.yaml create mode 100644 test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml create mode 100644 test/e2e/data/shared/v1.8/v1beta1/metadata.yaml create mode 100644 test/e2e/data/shared/v1.8/v1beta1_provider/metadata.yaml diff --git a/.gitignore b/.gitignore index 6b9dab90af..134102d9b1 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,8 @@ _artifacts/ # E2E test templates test/e2e/data/infrastructure-vsphere/**/cluster-template*.yaml -test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml +test/e2e/data/infrastructure-vsphere/main/**/clusterclass-quick-start.yaml +test/e2e/data/infrastructure-vsphere/*/clusterclass-quick-start.yaml # env vars file used in getting-started.md and manifests generation envvars.txt @@ -75,6 +76,7 @@ _releasenotes *~ *.tmp .DS_Store +*.swp # Ginkgo logs from test runs -*ginkgo-log.txt \ No newline at end of file +*ginkgo-log.txt diff --git a/Makefile b/Makefile index 5041dfe7be..495fa190da 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ GINKGO_SKIP ?= GINKGO_TIMEOUT ?= 2h E2E_CONF_FILE ?= "$(abspath test/e2e/config/vsphere-dev.yaml)" INTEGRATION_CONF_FILE ?= "$(abspath test/integration/integration-dev.yaml)" -E2E_TEMPLATE_DIR := "$(abspath test/e2e/data/infrastructure-vsphere/)" +E2E_TEMPLATE_DIR := $(abspath test/e2e/data/infrastructure-vsphere/) SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false GINKGO_NOCOLOR ?= false @@ -286,32 +286,42 @@ generate-doctoc: TRACE=$(TRACE) ./hack/generate-doctoc.sh .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, main) ## Generate test templates for all branches +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.7 v1.8 main) ## Generate test templates for all branches .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the main branch $(MAKE) e2e-flavors-main - cp $(RELEASE_DIR)/main/cluster-template.yaml $(E2E_TEMPLATE_DIR)/main/base/cluster-template.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/base > $(E2E_TEMPLATE_DIR)/main/cluster-template.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/hw-upgrade > $(E2E_TEMPLATE_DIR)/main/cluster-template-hw-upgrade.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/storage-policy > $(E2E_TEMPLATE_DIR)/main/cluster-template-storage-policy.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/remote-management > $(E2E_TEMPLATE_DIR)/main/cluster-template-remote-management.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/conformance > $(E2E_TEMPLATE_DIR)/main/cluster-template-conformance.yaml + cp "$(RELEASE_DIR)/main/cluster-template.yaml" "$(E2E_TEMPLATE_DIR)/main/base/cluster-template.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/base" > "$(E2E_TEMPLATE_DIR)/main/cluster-template.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/hw-upgrade" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-hw-upgrade.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/storage-policy" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-storage-policy.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/conformance" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-conformance.yaml" # Since CAPI uses different flavor names for KCP and MD remediation using MHC - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/mhc-remediation/kcp > $(E2E_TEMPLATE_DIR)/main/cluster-template-kcp-remediation.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/mhc-remediation/md > $(E2E_TEMPLATE_DIR)/main/cluster-template-md-remediation.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/node-drain > $(E2E_TEMPLATE_DIR)/main/cluster-template-node-drain.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/ignition > $(E2E_TEMPLATE_DIR)/main/cluster-template-ignition.yaml + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/mhc-remediation/kcp" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-kcp-remediation.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/mhc-remediation/md" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-md-remediation.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/node-drain" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-node-drain.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/ignition" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-ignition.yaml" # generate clusterclass and cluster topology - cp $(RELEASE_DIR)/main/cluster-template-topology.yaml $(E2E_TEMPLATE_DIR)/main/topology/cluster-template-topology.yaml - cp $(RELEASE_DIR)/main/clusterclass-template.yaml $(E2E_TEMPLATE_DIR)/main/clusterclass-quick-start.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/topology > $(E2E_TEMPLATE_DIR)/main/cluster-template-topology.yaml + cp "$(RELEASE_DIR)/main/clusterclass-template.yaml" "$(E2E_TEMPLATE_DIR)/main/clusterclass/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/clusterclass" > "$(E2E_TEMPLATE_DIR)/main/clusterclass-quick-start.yaml" + cp "$(RELEASE_DIR)/main/cluster-template-topology.yaml" "$(E2E_TEMPLATE_DIR)/main/topology/cluster-template-topology.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/topology" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-topology.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/remote-management" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-remote-management.yaml" # for PCI passthrough template - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/pci > $(E2E_TEMPLATE_DIR)/main/cluster-template-pci.yaml + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/pci" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-pci.yaml" # for DHCP overrides - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/dhcp-overrides > $(E2E_TEMPLATE_DIR)/main/cluster-template-dhcp-overrides.yaml - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/ownerreferences > $(E2E_TEMPLATE_DIR)/main/cluster-template-ownerreferences.yaml - + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/dhcp-overrides" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-dhcp-overrides.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/main/ownerreferences" > "$(E2E_TEMPLATE_DIR)/main/cluster-template-ownerreferences.yaml" + +.PHONY: generate-e2e-templates-v1.8 +generate-e2e-templates-v1.8: $(KUSTOMIZE) + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.8/clusterclass" > "$(E2E_TEMPLATE_DIR)/v1.8/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.8/workload" > "$(E2E_TEMPLATE_DIR)/v1.8/cluster-template-workload.yaml" + +.PHONY: generate-e2e-templates-v1.7 +generate-e2e-templates-v1.7: $(KUSTOMIZE) + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.7/clusterclass" > "$(E2E_TEMPLATE_DIR)/v1.7/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_TEMPLATE_DIR)/v1.7/workload" > "$(E2E_TEMPLATE_DIR)/v1.7/cluster-template-workload.yaml" ## -------------------------------------- ## Lint / Verify diff --git a/packaging/flavorgen/flavors/clusterclass_generators.go b/packaging/flavorgen/flavors/clusterclass_generators.go index 44dde533dd..4c8d79cc47 100644 --- a/packaging/flavorgen/flavors/clusterclass_generators.go +++ b/packaging/flavorgen/flavors/clusterclass_generators.go @@ -108,6 +108,7 @@ func getWorkersClass() clusterv1.WorkersClass { func getClusterClassPatches() []clusterv1.ClusterClassPatch { return []clusterv1.ClusterClassPatch{ + createFilesArrayPatch(), enableSSHPatch(), infraClusterPatch(), kubeVipEnabledPatch(), @@ -223,12 +224,6 @@ func newVSphereClusterTemplate() infrav1.VSphereClusterTemplate { } func newKubeadmControlPlaneTemplate(templateName string) controlplanev1.KubeadmControlPlaneTemplate { - files := []bootstrapv1.File{ - { - Owner: "root:root", - Path: "/etc/kubernetes/manifests/kube-vip.yaml", - }, - } return controlplanev1.KubeadmControlPlaneTemplate{ TypeMeta: metav1.TypeMeta{ Kind: util.TypeToKind(&controlplanev1.KubeadmControlPlaneTemplate{}), @@ -241,7 +236,7 @@ func newKubeadmControlPlaneTemplate(templateName string) controlplanev1.KubeadmC Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ Template: controlplanev1.KubeadmControlPlaneTemplateResource{ Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ - KubeadmConfigSpec: defaultKubeadmInitSpec(files), + KubeadmConfigSpec: defaultKubeadmInitSpec([]bootstrapv1.File{}), }, }, }, diff --git a/packaging/flavorgen/flavors/patches.go b/packaging/flavorgen/flavors/patches.go index 873dcdd606..16695a1238 100644 --- a/packaging/flavorgen/flavors/patches.go +++ b/packaging/flavorgen/flavors/patches.go @@ -19,6 +19,7 @@ package flavors import ( "fmt" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -29,6 +30,52 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/packaging/flavorgen/flavors/util" ) +func createFilesArrayPatch() clusterv1.ClusterClassPatch { + return clusterv1.ClusterClassPatch{ + Name: "createFilesArray", + Definitions: []clusterv1.PatchDefinition{ + { + Selector: clusterv1.PatchSelector{ + APIVersion: controlplanev1.GroupVersion.String(), + Kind: util.TypeToKind(&controlplanev1.KubeadmControlPlaneTemplate{}), + MatchResources: clusterv1.PatchSelectorMatch{ + ControlPlane: true, + }, + }, + JSONPatches: []clusterv1.JSONPatch{ + { + Op: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + Value: &apiextensionsv1.JSON{ + Raw: []byte("[]"), + }, + }, + }, + }, + { + Selector: clusterv1.PatchSelector{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: util.TypeToKind(&bootstrapv1.KubeadmConfigTemplate{}), + MatchResources: clusterv1.PatchSelectorMatch{ + MachineDeploymentClass: &clusterv1.PatchSelectorMatchMachineDeploymentClass{ + Names: []string{fmt.Sprintf("%s-worker", env.ClusterClassNameVar)}, + }, + }, + }, + JSONPatches: []clusterv1.JSONPatch{ + { + Op: "add", + Path: "/spec/template/spec/files", + Value: &apiextensionsv1.JSON{ + Raw: []byte("[]"), + }, + }, + }, + }, + }, + } +} + func enableSSHPatch() clusterv1.ClusterClassPatch { return clusterv1.ClusterClassPatch{ Name: "enableSSHIntoNodes", @@ -126,7 +173,7 @@ func infraClusterPatch() clusterv1.ClusterClassPatch { func kubeVipEnabledPatch() clusterv1.ClusterClassPatch { return clusterv1.ClusterClassPatch{ - Name: "kubeVipEnabled", + Name: "kubeVipPodManifest", Definitions: []clusterv1.PatchDefinition{ { Selector: clusterv1.PatchSelector{ @@ -139,9 +186,11 @@ func kubeVipEnabledPatch() clusterv1.ClusterClassPatch { JSONPatches: []clusterv1.JSONPatch{ { Op: "add", - Path: "/spec/template/spec/kubeadmConfigSpec/files/0/content", + Path: "/spec/template/spec/kubeadmConfigSpec/files/-", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("kubeVipPodManifest"), + Template: pointer.String(`owner: root:root +path: "/etc/kubernetes/manifests/kube-vip.yaml" +content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }}`), }, }, }, diff --git a/templates/clusterclass-template.yaml b/templates/clusterclass-template.yaml index 15b5cca25c..b1413f9013 100644 --- a/templates/clusterclass-template.yaml +++ b/templates/clusterclass-template.yaml @@ -32,6 +32,28 @@ spec: name: '${CLUSTER_CLASS_NAME}' namespace: '${NAMESPACE}' patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files + value: [] + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files + value: [] + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: createFilesArray - definitions: - jsonPatches: - op: add @@ -96,15 +118,18 @@ spec: - definitions: - jsonPatches: - op: add - path: /spec/template/spec/kubeadmConfigSpec/files/0/content + path: /spec/template/spec/kubeadmConfigSpec/files/- valueFrom: - variable: kubeVipPodManifest + template: |- + owner: root:root + path: "/etc/kubernetes/manifests/kube-vip.yaml" + content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} selector: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true - name: kubeVipEnabled + name: kubeVipPodManifest variables: - name: sshKey required: false @@ -228,9 +253,6 @@ spec: controllerManager: extraArgs: cloud-provider: external - files: - - owner: root:root - path: /etc/kubernetes/manifests/kube-vip.yaml initConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go new file mode 100644 index 0000000000..1e588ae866 --- /dev/null +++ b/test/e2e/clusterctl_upgrade_test.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" +) + +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8=>current, CAPI 1.5=>1.6) [ClusterClass]", func() { + capi_e2e.ClusterctlUpgradeSpec(context.TODO(), func() capi_e2e.ClusterctlUpgradeSpecInput { + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithProvidersContract: "v1beta1", + MgmtFlavor: "remote-management", + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/clusterctl-{OS}-{ARCH}", + InitWithCoreProvider: "cluster-api:v1.5.4", + InitWithBootstrapProviders: []string{"kubeadm:v1.5.4"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.5.4"}, + InitWithInfrastructureProviders: []string{"vsphere:v1.8.4"}, + InitWithRuntimeExtensionProviders: []string{}, + InitWithKubernetesVersion: "v1.28.0", + WorkloadKubernetesVersion: "v1.28.0", + WorkloadFlavor: "workload", + } + }) +}) + +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.7=>current, CAPI 1.4=>1.6) [ClusterClass]", func() { + capi_e2e.ClusterctlUpgradeSpec(context.TODO(), func() capi_e2e.ClusterctlUpgradeSpecInput { + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithProvidersContract: "v1beta1", + MgmtFlavor: "remote-management", + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/clusterctl-{OS}-{ARCH}", + InitWithCoreProvider: "cluster-api:v1.4.9", + InitWithBootstrapProviders: []string{"kubeadm:v1.4.9"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.4.9"}, + InitWithInfrastructureProviders: []string{"vsphere:v1.7.4"}, + InitWithRuntimeExtensionProviders: []string{}, + InitWithKubernetesVersion: "v1.27.3", + WorkloadKubernetesVersion: "v1.27.3", + WorkloadFlavor: "workload", + } + }) +}) diff --git a/test/e2e/config/vsphere-ci.yaml b/test/e2e/config/vsphere-ci.yaml index 60801ee32b..4d6a37b303 100644 --- a/test/e2e/config/vsphere-ci.yaml +++ b/test/e2e/config/vsphere-ci.yaml @@ -38,6 +38,26 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.5.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/core-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4.9 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/core-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: BootstrapProvider @@ -52,6 +72,26 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.5.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/bootstrap-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4.9 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/bootstrap-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: ControlPlaneProvider @@ -66,6 +106,26 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.5.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/control-plane-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4.9 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/control-plane-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: vsphere type: InfrastructureProvider @@ -81,21 +141,41 @@ providers: new: "imagePullPolicy: IfNotPresent" files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-conformance.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-dhcp-overrides.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-hw-upgrade.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ignition.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-kcp-remediation.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-md-remediation.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-node-drain.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ownerreferences.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-pci.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-remote-management.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-storage-policy.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-topology.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-dhcp-overrides.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ownerreferences.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ignition.yaml" - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" + - name: v1.8.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.8.4/infrastructure-components.yaml" + type: "url" + contract: v1beta1 + files: + # Add a cluster template + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" + - name: v1.7.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.7.4/infrastructure-components.yaml" + type: "url" + contract: v1beta1 + files: + # Add a cluster template + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.7/v1beta1_provider/metadata.yaml" variables: KUBERNETES_VERSION: "v1.28.0" diff --git a/test/e2e/config/vsphere-dev.yaml b/test/e2e/config/vsphere-dev.yaml index 4d9b910690..dbc5caa752 100644 --- a/test/e2e/config/vsphere-dev.yaml +++ b/test/e2e/config/vsphere-dev.yaml @@ -41,6 +41,26 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.5.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/core-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4. + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4./core-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: BootstrapProvider @@ -55,6 +75,26 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.5.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/bootstrap-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4. + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4./bootstrap-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: ControlPlaneProvider @@ -69,6 +109,26 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.5.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/control-plane-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4. + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4./control-plane-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: vsphere type: InfrastructureProvider @@ -84,21 +144,41 @@ providers: new: "imagePullPolicy: IfNotPresent" files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-conformance.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-dhcp-overrides.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-hw-upgrade.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ignition.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-kcp-remediation.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-md-remediation.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-node-drain.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ownerreferences.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-pci.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-remote-management.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-storage-policy.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-topology.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-dhcp-overrides.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ignition.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ownerreferences.yaml" - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" + - name: v1.8.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.8.4/infrastructure-components.yaml" + type: "url" + contract: v1beta1 + files: + # Add a cluster template + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.8/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" + - name: v1.7.4 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/v1.7.4/infrastructure-components.yaml" + type: "url" + contract: v1beta1 + files: + # Add a cluster template + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/v1.7/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/v1.7/v1beta1_provider/metadata.yaml" variables: KUBERNETES_VERSION: "v1.28.0" @@ -155,8 +235,8 @@ variables: intervals: default/wait-controllers: ["5m", "10s"] default/wait-cluster: ["5m", "10s"] - default/wait-control-plane: ["20m", "10s"] - default/wait-worker-nodes: ["20m", "10s"] + default/wait-control-plane: ["10m", "10s"] + default/wait-worker-nodes: ["10m", "10s"] default/wait-delete-cluster: ["5m", "10s"] default/wait-machine-upgrade: ["15m", "1m"] default/wait-machine-remediation: ["15m", "10s"] diff --git a/test/e2e/data/infrastructure-vsphere/main/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/main/clusterclass/kustomization.yaml new file mode 100644 index 0000000000..820776eeaa --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/clusterclass/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./clusterclass-quick-start.yaml +patches: + - target: + kind: ClusterClass + path: ./patch-vsphere-template.yaml + - target: + kind: ClusterClass + path: ./patch-prekubeadmscript.yaml diff --git a/test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-prekubeadmscript.yaml new file mode 100644 index 0000000000..3e6e63b28d --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-prekubeadmscript.yaml @@ -0,0 +1,51 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/- + value: "/opt/prekubeadmscript.sh" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/opt/prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" .preKubeadmScript }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: "/opt/prekubeadmscript.sh" + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/opt/prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" .preKubeadmScript }} + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .preKubeadmScript }}true{{ end }}' + name: preKubeadmScript +- op: add + path: /spec/variables/- + value: + name: preKubeadmScript + required: false + schema: + openAPIV3Schema: + type: string + description: Script to run in preKubeadmCommands. diff --git a/test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-vsphere-template.yaml new file mode 100644 index 0000000000..5f7f38db63 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/clusterclass/patch-vsphere-template.yaml @@ -0,0 +1,37 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + template: |- + {{- if semverCompare ">= v1.28" .builtin.controlPlane.version -}} + ubuntu-2204-kube-{{ .builtin.controlPlane.version }} + {{- else -}} + ubuntu-2004-kube-{{ .builtin.controlPlane.version }} + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + template: |- + {{- if semverCompare ">= v1.28" .builtin.machineDeployment.version -}} + ubuntu-2204-kube-{{ .builtin.machineDeployment.version }} + {{- else -}} + ubuntu-2004-kube-{{ .builtin.machineDeployment.version }} + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere/main/commons/cluster-resource-set-csi-insecure.yaml b/test/e2e/data/infrastructure-vsphere/main/commons/cluster-resource-set-csi-insecure.yaml index a32e96789b..6edd2a37a0 100644 --- a/test/e2e/data/infrastructure-vsphere/main/commons/cluster-resource-set-csi-insecure.yaml +++ b/test/e2e/data/infrastructure-vsphere/main/commons/cluster-resource-set-csi-insecure.yaml @@ -8,8 +8,8 @@ stringData: apiVersion: v1 kind: Secret metadata: - name: csi-vsphere-config - namespace: kube-system + name: vsphere-config-secret + namespace: vmware-system-csi stringData: csi-vsphere.conf: |+ [Global] diff --git a/test/e2e/data/infrastructure-vsphere/main/remote-management/image-injection.yaml b/test/e2e/data/infrastructure-vsphere/main/remote-management/image-injection.yaml index 1e5205a590..0cbda95f31 100644 --- a/test/e2e/data/infrastructure-vsphere/main/remote-management/image-injection.yaml +++ b/test/e2e/data/infrastructure-vsphere/main/remote-management/image-injection.yaml @@ -1,35 +1,8 @@ ---- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: '${CLUSTER_NAME}' - namespace: '${NAMESPACE}' -spec: - kubeadmConfigSpec: - preKubeadmCommands: - - hostname "{{ ds.meta_data.hostname }}" - - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - - echo "127.0.0.1 localhost" >>/etc/hosts - - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts - - echo "{{ ds.meta_data.hostname }}" >/etc/hostname - - mkdir -p /opt/cluster-api - - curl "https://storage.googleapis.com/capv-ci/${E2E_IMAGE_SHA}" -o /opt/cluster-api/image.tar - - ctr -n k8s.io images import /opt/cluster-api/image.tar ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: '${CLUSTER_NAME}-md-0' - namespace: '${NAMESPACE}' -spec: - template: - spec: - preKubeadmCommands: - - hostname "{{ ds.meta_data.hostname }}" - - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts - - echo "127.0.0.1 localhost" >>/etc/hosts - - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts - - echo "{{ ds.meta_data.hostname }}" >/etc/hostname - - mkdir -p /opt/cluster-api - - curl "https://storage.googleapis.com/capv-ci/${E2E_IMAGE_SHA}" -o /opt/cluster-api/image.tar - - ctr -n k8s.io images import /opt/cluster-api/image.tar +- op: add + path: /spec/topology/variables/- + value: + name: preKubeadmScript + value: | + mkdir -p /opt/cluster-api + curl "https://storage.googleapis.com/capv-ci/${E2E_IMAGE_SHA}" -o /opt/cluster-api/image.tar + ctr -n k8s.io images import /opt/cluster-api/image.tar diff --git a/test/e2e/data/infrastructure-vsphere/main/remote-management/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/main/remote-management/kustomization.yaml index f9e14de80e..a3096d07f7 100644 --- a/test/e2e/data/infrastructure-vsphere/main/remote-management/kustomization.yaml +++ b/test/e2e/data/infrastructure-vsphere/main/remote-management/kustomization.yaml @@ -1,6 +1,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../base -patchesStrategicMerge: - - image-injection.yaml + - ../topology +patches: + - target: + kind: Cluster + path: ./image-injection.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-network-CIDR.yaml new file mode 100644 index 0000000000..24d0253cef --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-network-CIDR.yaml @@ -0,0 +1,10 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.30.0/24 diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-csi-insecure.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-csi-insecure.yaml new file mode 100644 index 0000000000..86c659694a --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-csi-insecure.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + insecure-flag = "${VSPHERE_INSECURE_CSI}" + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-label.yaml new file mode 100644 index 0000000000..1447050b04 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set-label.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' + labels: + cni: "${CLUSTER_NAME}-crs-cni" diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set.yaml new file mode 100644 index 0000000000..6507eed65e --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-resource-set.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "cni-${CLUSTER_NAME}-crs-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-template-topology.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-template-topology.yaml new file mode 100644 index 0000000000..078653a4be --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster-template-topology.yaml @@ -0,0 +1,835 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + topology: + class: '${CLUSTER_CLASS_NAME}' + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + variables: + - name: sshKey + value: '${VSPHERE_SSH_AUTHORIZED_KEY}' + - name: infraServer + value: + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + url: '${VSPHERE_SERVER}' + - name: kubeVipPodManifest + value: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.5.11 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + - name: controlPlaneIpAddr + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: credsSecretName + value: '${CLUSTER_NAME}' + version: '${KUBERNETES_VERSION}' + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + metadata: {} + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-attacher:v3.0.0 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: service-account + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + component: cloud-controller-manager + tier: control-plane + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + name: vsphere-cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + securityContext: + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster.yaml new file mode 100644 index 0000000000..0048c12797 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/cluster.yaml @@ -0,0 +1,996 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: '${CLUSTER_NAME}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + identityRef: + kind: Secret + name: '${CLUSTER_NAME}' + server: '${VSPHERE_SERVER}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.5.11 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: '${CLUSTER_NAME}' + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: '${KUBERNETES_VERSION}' +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + clusterName: '${CLUSTER_NAME}' + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: '${CLUSTER_NAME}-md-0' + clusterName: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_NAME}-worker + version: '${KUBERNETES_VERSION}' +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-attacher:v3.0.0 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: service-account + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + component: cloud-controller-manager + tier: control-plane + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + name: vsphere-cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + securityContext: + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/bases/remove-storage-policy.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/bases/remove-storage-policy.yaml new file mode 100644 index 0000000000..9e0cac085c --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/bases/remove-storage-policy.yaml @@ -0,0 +1,2 @@ +- op: remove + path: /spec/template/spec/storagePolicyName diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml new file mode 100644 index 0000000000..4ba06d45f5 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/clusterclass-quick-start.yaml @@ -0,0 +1,297 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereClusterTemplate +metadata: + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: '${CLUSTER_CLASS_NAME}' +spec: + controlPlane: + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files + value: [] + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files + value: [] + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: createFilesArray + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .sshKey }}true{{end}}' + name: enableSSHIntoNodes + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneIpAddr }}' + port: 6443 + - op: add + path: /spec/template/spec/identityRef + valueFrom: + template: | + kind: Secret + name: '{{ .credsSecretName }}' + - op: add + path: /spec/template/spec/server + valueFrom: + variable: infraServer.url + - op: add + path: /spec/template/spec/thumbprint + valueFrom: + variable: infraServer.thumbprint + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + matchResources: + infrastructureCluster: true + name: infraClusterSubstitutions + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: |- + owner: root:root + path: "/etc/kubernetes/manifests/kube-vip.yaml" + content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + name: kubeVipPodManifest + variables: + - name: sshKey + required: false + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - name: infraServer + required: true + schema: + openAPIV3Schema: + properties: + thumbprint: + type: string + url: + type: string + type: object + - name: controlPlaneIpAddr + required: true + schema: + openAPIV3Schema: + description: Floating VIP for the control plane. + type: string + - name: credsSecretName + required: true + schema: + openAPIV3Schema: + description: Secret containing the credentials for the infra cluster. + type: string + - name: kubeVipPodManifest + required: true + schema: + openAPIV3Schema: + description: kube-vip manifest for the control plane. + type: string + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' + metadata: {} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml new file mode 100644 index 0000000000..2f2fd513ce --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./clusterclass-quick-start.yaml +# patchesStrategicMerge: +# - vsphere-template.yaml +patches: + - target: + kind: ClusterClass + path: ./patch-vsphere-template.yaml + - target: + kind: ClusterClass + path: ./patch-prekubeadmscript.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-prekubeadmscript.yaml new file mode 100644 index 0000000000..3e6e63b28d --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-prekubeadmscript.yaml @@ -0,0 +1,51 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/- + value: "/opt/prekubeadmscript.sh" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/opt/prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" .preKubeadmScript }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: "/opt/prekubeadmscript.sh" + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/opt/prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" .preKubeadmScript }} + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .preKubeadmScript }}true{{ end }}' + name: preKubeadmScript +- op: add + path: /spec/variables/- + value: + name: preKubeadmScript + required: false + schema: + openAPIV3Schema: + type: string + description: Script to run in preKubeadmCommands. diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-vsphere-template.yaml new file mode 100644 index 0000000000..5f7f38db63 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/clusterclass/patch-vsphere-template.yaml @@ -0,0 +1,37 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + template: |- + {{- if semverCompare ">= v1.28" .builtin.controlPlane.version -}} + ubuntu-2204-kube-{{ .builtin.controlPlane.version }} + {{- else -}} + ubuntu-2004-kube-{{ .builtin.controlPlane.version }} + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + template: |- + {{- if semverCompare ">= v1.28" .builtin.machineDeployment.version -}} + ubuntu-2204-kube-{{ .builtin.machineDeployment.version }} + {{- else -}} + ubuntu-2004-kube-{{ .builtin.machineDeployment.version }} + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml new file mode 100644 index 0000000000..0324634755 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/topology/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster-template-topology.yaml + - ../bases/cluster-resource-set.yaml +patchesStrategicMerge: + - ../bases/cluster-resource-set-label.yaml + - ../bases/cluster-network-CIDR.yaml + - ../bases/cluster-resource-set-csi-insecure.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/workload/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/workload/kustomization.yaml new file mode 100644 index 0000000000..e191dff830 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/workload/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../topology +patches: + - target: + kind: Cluster + path: workload-control-plane-endpoint-ip.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.7/workload/workload-control-plane-endpoint-ip.yaml b/test/e2e/data/infrastructure-vsphere/v1.7/workload/workload-control-plane-endpoint-ip.yaml new file mode 100644 index 0000000000..f3cfc35a4e --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.7/workload/workload-control-plane-endpoint-ip.yaml @@ -0,0 +1,5 @@ +- op: replace + path: /spec/topology/variables/3 + value: + name: controlPlaneIpAddr + value: "${WORKLOAD_CONTROL_PLANE_ENDPOINT_IP}" diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-network-CIDR.yaml new file mode 100644 index 0000000000..24d0253cef --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-network-CIDR.yaml @@ -0,0 +1,10 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.30.0/24 diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-csi-insecure.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-csi-insecure.yaml new file mode 100644 index 0000000000..86c659694a --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-csi-insecure.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + insecure-flag = "${VSPHERE_INSECURE_CSI}" + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-label.yaml new file mode 100644 index 0000000000..1447050b04 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set-label.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' + labels: + cni: "${CLUSTER_NAME}-crs-cni" diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set.yaml new file mode 100644 index 0000000000..6507eed65e --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-resource-set.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "cni-${CLUSTER_NAME}-crs-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-template-topology.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-template-topology.yaml new file mode 100644 index 0000000000..078653a4be --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster-template-topology.yaml @@ -0,0 +1,835 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + topology: + class: '${CLUSTER_CLASS_NAME}' + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + variables: + - name: sshKey + value: '${VSPHERE_SSH_AUTHORIZED_KEY}' + - name: infraServer + value: + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + url: '${VSPHERE_SERVER}' + - name: kubeVipPodManifest + value: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.5.11 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + - name: controlPlaneIpAddr + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: credsSecretName + value: '${CLUSTER_NAME}' + version: '${KUBERNETES_VERSION}' + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + metadata: {} + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-attacher:v3.0.0 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: service-account + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + component: cloud-controller-manager + tier: control-plane + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + name: vsphere-cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + securityContext: + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster.yaml new file mode 100644 index 0000000000..bf66968818 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/cluster.yaml @@ -0,0 +1,998 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: '${CLUSTER_NAME}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + identityRef: + kind: Secret + name: '${CLUSTER_NAME}' + server: '${VSPHERE_SERVER}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: trySoft + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: trySoft + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE:=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.5.11 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: '${CLUSTER_NAME}' + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: '${KUBERNETES_VERSION}' +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}-md-0' + namespace: '${NAMESPACE}' +spec: + clusterName: '${CLUSTER_NAME}' + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: '${CLUSTER_NAME}-md-0' + clusterName: '${CLUSTER_NAME}' + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_NAME}-worker + version: '${KUBERNETES_VERSION}' +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + cluster-id = "${NAMESPACE}/${CLUSTER_NAME}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-attacher:v3.0.0 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: quay.io/k8scsi/livenessprobe:v2.1.0 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: quay.io/k8scsi/csi-provisioner:v2.0.0 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: service-account + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + component: cloud-controller-manager + tier: control-plane + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + name: vsphere-cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + name: vsphere-cloud-controller-manager + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + priorityClassName: system-node-critical + securityContext: + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/bases/remove-storage-policy.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/bases/remove-storage-policy.yaml new file mode 100644 index 0000000000..9e0cac085c --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/bases/remove-storage-policy.yaml @@ -0,0 +1,2 @@ +- op: remove + path: /spec/template/spec/storagePolicyName diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/cluster-template/kustomization.yaml new file mode 100644 index 0000000000..2d0ccfd062 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/cluster-template/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster.yaml + - ../bases/cluster-resource-set.yaml +patchesStrategicMerge: + - ../bases/cluster-resource-set-label.yaml + - ../bases/cluster-network-CIDR.yaml + - ../bases/cluster-resource-set-csi-insecure.yaml +patches: + - target: + kind: VSphereMachineTemplate + path: ../bases/remove-storage-policy.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/clusterclass-quick-start.yaml new file mode 100644 index 0000000000..a9aeb15574 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/clusterclass-quick-start.yaml @@ -0,0 +1,299 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereClusterTemplate +metadata: + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: '${CLUSTER_CLASS_NAME}' +spec: + controlPlane: + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files + value: [] + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files + value: [] + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: createFilesArray + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .sshKey }}true{{end}}' + name: enableSSHIntoNodes + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneIpAddr }}' + port: 6443 + - op: add + path: /spec/template/spec/identityRef + valueFrom: + template: | + kind: Secret + name: '{{ .credsSecretName }}' + - op: add + path: /spec/template/spec/server + valueFrom: + variable: infraServer.url + - op: add + path: /spec/template/spec/thumbprint + valueFrom: + variable: infraServer.thumbprint + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + matchResources: + infrastructureCluster: true + name: infraClusterSubstitutions + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: |- + owner: root:root + path: "/etc/kubernetes/manifests/kube-vip.yaml" + content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + name: kubeVipPodManifest + variables: + - name: sshKey + required: false + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - name: infraServer + required: true + schema: + openAPIV3Schema: + properties: + thumbprint: + type: string + url: + type: string + type: object + - name: controlPlaneIpAddr + required: true + schema: + openAPIV3Schema: + description: Floating VIP for the control plane. + type: string + - name: credsSecretName + required: true + schema: + openAPIV3Schema: + description: Secret containing the credentials for the infra cluster. + type: string + - name: kubeVipPodManifest + required: true + schema: + openAPIV3Schema: + description: kube-vip manifest for the control plane. + type: string + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' + metadata: {} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: hard + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: hard + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/kustomization.yaml new file mode 100644 index 0000000000..2f2fd513ce --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./clusterclass-quick-start.yaml +# patchesStrategicMerge: +# - vsphere-template.yaml +patches: + - target: + kind: ClusterClass + path: ./patch-vsphere-template.yaml + - target: + kind: ClusterClass + path: ./patch-prekubeadmscript.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-prekubeadmscript.yaml new file mode 100644 index 0000000000..3e6e63b28d --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-prekubeadmscript.yaml @@ -0,0 +1,51 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/- + value: "/opt/prekubeadmscript.sh" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/opt/prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" .preKubeadmScript }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: "/opt/prekubeadmscript.sh" + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/opt/prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" .preKubeadmScript }} + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .preKubeadmScript }}true{{ end }}' + name: preKubeadmScript +- op: add + path: /spec/variables/- + value: + name: preKubeadmScript + required: false + schema: + openAPIV3Schema: + type: string + description: Script to run in preKubeadmCommands. diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-vsphere-template.yaml new file mode 100644 index 0000000000..5f7f38db63 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/clusterclass/patch-vsphere-template.yaml @@ -0,0 +1,37 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + template: |- + {{- if semverCompare ">= v1.28" .builtin.controlPlane.version -}} + ubuntu-2204-kube-{{ .builtin.controlPlane.version }} + {{- else -}} + ubuntu-2004-kube-{{ .builtin.controlPlane.version }} + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + template: |- + {{- if semverCompare ">= v1.28" .builtin.machineDeployment.version -}} + ubuntu-2204-kube-{{ .builtin.machineDeployment.version }} + {{- else -}} + ubuntu-2004-kube-{{ .builtin.machineDeployment.version }} + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/topology/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/topology/kustomization.yaml new file mode 100644 index 0000000000..99667d78d5 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/topology/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster-template-topology.yaml + - ../bases/cluster-resource-set.yaml +patchesStrategicMerge: + - ../bases/cluster-resource-set-label.yaml + - ../bases/cluster-network-CIDR.yaml + - ../bases/cluster-resource-set-csi-insecure.yaml +patches: + - target: + kind: VSphereMachineTemplate + path: ../bases/remove-storage-policy.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/workload/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/workload/kustomization.yaml new file mode 100644 index 0000000000..e191dff830 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/workload/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../topology +patches: + - target: + kind: Cluster + path: workload-control-plane-endpoint-ip.yaml diff --git a/test/e2e/data/infrastructure-vsphere/v1.8/workload/workload-control-plane-endpoint-ip.yaml b/test/e2e/data/infrastructure-vsphere/v1.8/workload/workload-control-plane-endpoint-ip.yaml new file mode 100644 index 0000000000..f3cfc35a4e --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/v1.8/workload/workload-control-plane-endpoint-ip.yaml @@ -0,0 +1,5 @@ +- op: replace + path: /spec/topology/variables/3 + value: + name: controlPlaneIpAddr + value: "${WORKLOAD_CONTROL_PLANE_ENDPOINT_IP}" diff --git a/test/e2e/data/shared/v1.7/v1beta1/metadata.yaml b/test/e2e/data/shared/v1.7/v1beta1/metadata.yaml new file mode 100644 index 0000000000..d8e8461d1e --- /dev/null +++ b/test/e2e/data/shared/v1.7/v1beta1/metadata.yaml @@ -0,0 +1,32 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 4 + contract: v1beta1 + - major: 1 + minor: 3 + contract: v1beta1 + - major: 1 + minor: 2 + contract: v1beta1 + - major: 1 + minor: 1 + contract: v1beta1 + - major: 1 + minor: 0 + contract: v1beta1 + - major: 0 + minor: 4 + contract: v1alpha4 + - major: 0 + minor: 3 + contract: v1alpha3 + - major: 0 + minor: 2 + contract: v1alpha2 diff --git a/test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml b/test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml new file mode 100644 index 0000000000..6edf676aba --- /dev/null +++ b/test/e2e/data/shared/v1.7/v1beta1_provider/metadata.yaml @@ -0,0 +1,20 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 5 + contract: v1alpha2 + - major: 0 + minor: 7 + contract: v1alpha3 + - major: 0 + minor: 8 + contract: v1alpha4 + - major: 1 + minor: 7 + contract: v1beta1 diff --git a/test/e2e/data/shared/v1.8/v1beta1/metadata.yaml b/test/e2e/data/shared/v1.8/v1beta1/metadata.yaml new file mode 100644 index 0000000000..b7332a13a4 --- /dev/null +++ b/test/e2e/data/shared/v1.8/v1beta1/metadata.yaml @@ -0,0 +1,35 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 5 + contract: v1beta1 + - major: 1 + minor: 4 + contract: v1beta1 + - major: 1 + minor: 3 + contract: v1beta1 + - major: 1 + minor: 2 + contract: v1beta1 + - major: 1 + minor: 1 + contract: v1beta1 + - major: 1 + minor: 0 + contract: v1beta1 + - major: 0 + minor: 4 + contract: v1alpha4 + - major: 0 + minor: 3 + contract: v1alpha3 + - major: 0 + minor: 2 + contract: v1alpha2 diff --git a/test/e2e/data/shared/v1.8/v1beta1_provider/metadata.yaml b/test/e2e/data/shared/v1.8/v1beta1_provider/metadata.yaml new file mode 100644 index 0000000000..2a1becb6c1 --- /dev/null +++ b/test/e2e/data/shared/v1.8/v1beta1_provider/metadata.yaml @@ -0,0 +1,23 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 5 + contract: v1alpha2 + - major: 0 + minor: 7 + contract: v1alpha3 + - major: 0 + minor: 8 + contract: v1alpha4 + - major: 1 + minor: 7 + contract: v1beta1 + - major: 1 + minor: 8 + contract: v1beta1