From 3fc76d8373f8e8658d379dc54443524bdfe2c177 Mon Sep 17 00:00:00 2001 From: muhammad adil ghaffar Date: Tue, 20 Feb 2024 12:50:39 +0200 Subject: [PATCH] Fixing and simplifying clusterctl upgrade tests Signed-off-by: muhammad adil ghaffar --- test/e2e/config/e2e_conf.yaml | 79 ++++++------ test/e2e/upgrade_clusterctl_test.go | 183 ++++++++++++++++------------ 2 files changed, 149 insertions(+), 113 deletions(-) diff --git a/test/e2e/config/e2e_conf.yaml b/test/e2e/config/e2e_conf.yaml index 2604e5a0ab..ad244801ef 100644 --- a/test/e2e/config/e2e_conf.yaml +++ b/test/e2e/config/e2e_conf.yaml @@ -18,78 +18,89 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: ${CAPI_FROM_RELEASE} - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_FROM_RELEASE}/core-components.yaml" + - name: v1.5.7 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/core-components.yaml" type: "url" - contract: ${CONTRACT_FROM} + contract: v1beta1 replacements: - old: --metrics-addr=127.0.0.1:8080 new: --metrics-addr=:8080 files: - - sourcePath: "../data/shared/${CAPI_FROM_RELEASE:0:4}/metadata.yaml" - - name: ${CAPI_TO_RELEASE} + - sourcePath: "../data/shared/v1.5/metadata.yaml" + - name: v1.4.9 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_TO_RELEASE}/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/core-components.yaml" type: "url" - contract: ${CONTRACT_TO} + contract: v1beta1 files: - - sourcePath: "../data/shared/${CAPI_TO_RELEASE:0:4}/metadata.yaml" + - sourcePath: "../data/shared/v1.4/metadata.yaml" replacements: - - old: "--leader-elect" - new: "--leader-elect=false" + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 - name: kubeadm type: BootstrapProvider versions: - - name: ${CAPI_FROM_RELEASE} # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_FROM_RELEASE}/bootstrap-components.yaml" + - name: v1.5.7 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/bootstrap-components.yaml" type: "url" - contract: ${CONTRACT_FROM} + contract: v1beta1 replacements: - old: --metrics-addr=127.0.0.1:8080 new: --metrics-addr=:8080 files: - - sourcePath: "../data/shared/${CAPI_FROM_RELEASE:0:4}/metadata.yaml" - - name: ${CAPI_TO_RELEASE} + - sourcePath: "../data/shared/v1.5/metadata.yaml" + - name: v1.4.9 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_TO_RELEASE}/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/bootstrap-components.yaml" type: "url" - contract: ${CONTRACT_TO} + contract: v1beta1 files: - - sourcePath: "../data/shared/${CAPI_TO_RELEASE:0:4}/metadata.yaml" + - sourcePath: "../data/shared/v1.4/metadata.yaml" replacements: - - old: "--leader-elect" - new: "--leader-elect=false" + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 - name: kubeadm type: ControlPlaneProvider versions: - - name: ${CAPI_FROM_RELEASE} # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_FROM_RELEASE}/control-plane-components.yaml" + - name: v1.5.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/control-plane-components.yaml" type: "url" - contract: ${CONTRACT_FROM} + contract: v1beta1 replacements: - old: --metrics-addr=127.0.0.1:8080 new: --metrics-addr=:8080 files: - - sourcePath: "../data/shared/${CAPI_FROM_RELEASE:0:4}/metadata.yaml" - - name: ${CAPI_TO_RELEASE} + - sourcePath: "../data/shared/v1.5/metadata.yaml" + - name: v1.4.9 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_TO_RELEASE}/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/control-plane-components.yaml" type: "url" - contract: ${CONTRACT_TO} + contract: v1beta1 files: - - sourcePath: "../data/shared/${CAPI_TO_RELEASE:0:4}/metadata.yaml" + - sourcePath: "../data/shared/$v1.4/metadata.yaml" replacements: - - old: "--leader-elect" - new: "--leader-elect=false" + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 - name: metal3 type: InfrastructureProvider versions: - - name: ${CAPM3_FROM_RELEASE} - value: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/download/${CAPM3_FROM_RELEASE}/infrastructure-components.yaml" + - name: v1.5.3 + value: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/download/v1.5.3/infrastructure-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/infrastructure-metal3/v1.5/metadata.yaml" + targetName: "metadata.yaml" + - sourcePath: "../_out/cluster-template-ubuntu.yaml" + targetName: "cluster-template-ubuntu.yaml" + - sourcePath: "../_out/cluster-template-upgrade-workload.yaml" + targetName: "cluster-template-upgrade-workload.yaml" + - name: v1.4.4 + value: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/download/v1.4.4/infrastructure-components.yaml" type: "url" - contract: ${CONTRACT_FROM} + contract: v1beta1 files: - - sourcePath: "../data/infrastructure-metal3/${CAPM3_FROM_RELEASE:0:4}/metadata.yaml" + - sourcePath: "../data/infrastructure-metal3/v1.4/metadata.yaml" targetName: "metadata.yaml" - sourcePath: "../_out/cluster-template-ubuntu.yaml" targetName: "cluster-template-ubuntu.yaml" diff --git a/test/e2e/upgrade_clusterctl_test.go b/test/e2e/upgrade_clusterctl_test.go index af84c9b13f..0c18ae90a2 100644 --- a/test/e2e/upgrade_clusterctl_test.go +++ b/test/e2e/upgrade_clusterctl_test.go @@ -11,6 +11,8 @@ import ( bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" framework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" @@ -18,95 +20,105 @@ import ( const workDir = "/opt/metal3-dev-env/" -var _ = Describe(fmt.Sprintf("When testing cluster upgrade from releases %s > current [clusterctl-upgrade]", os.Getenv("CAPM3_FROM_RELEASE")), func() { +var _ = Describe("When testing cluster upgrade from releases (v1.5=>current) [clusterctl-upgrade]"), func() { BeforeEach(func() { osType := strings.ToLower(os.Getenv("OS")) Expect(osType).ToNot(Equal("")) validateGlobals(specName) - imageURL, imageChecksum := EnsureImage(e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION")) + imageURL, imageChecksum := EnsureImage(e2eConfig.GetVariable("v1.28.1")) os.Setenv("IMAGE_RAW_CHECKSUM", imageChecksum) os.Setenv("IMAGE_RAW_URL", imageURL) // We need to override clusterctl apply log folder to avoid getting our credentials exposed. clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) }) - Releasev1 := strings.Contains(os.Getenv("CAPM3_FROM_RELEASE"), "v1") - if Releasev1 { - capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { - return capi_e2e.ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InitWithCoreProvider: fmt.Sprintf("cluster-api:%s", os.Getenv("CAPI_FROM_RELEASE")), - InitWithBootstrapProviders: []string{fmt.Sprintf("kubeadm:%s", os.Getenv("CAPI_FROM_RELEASE"))}, - InitWithControlPlaneProviders: []string{fmt.Sprintf("kubeadm:%s", os.Getenv("CAPI_FROM_RELEASE"))}, - InitWithInfrastructureProviders: []string{fmt.Sprintf("metal3:%s", os.Getenv("CAPM3_FROM_RELEASE"))}, - InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), - WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), - InitWithBinary: e2eConfig.GetVariable("INIT_WITH_BINARY"), - PreInit: func(clusterProxy framework.ClusterProxy) { - preInitFunc(clusterProxy) - // Override capi/capm3 versions exported in preInit - os.Setenv("CAPI_VERSION", "v1beta1") - os.Setenv("CAPM3_VERSION", "v1beta1") - os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath()) - }, - PreWaitForCluster: preWaitForCluster, - PreUpgrade: preUpgrade, - PreCleanupManagementCluster: preCleanupManagementCluster, - MgmtFlavor: osType, - WorkloadFlavor: osType, - } + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithCoreProvider: "cluster-api:v1.5.7", + InitWithBootstrapProviders: []string{"kubeadm:v1.5.7"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.5.7"}, + InitWithInfrastructureProviders: []string{"metal3:v1.5.3"}, + InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), + WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/clusterctl-{OS}-{ARCH}", + PreInit: func(clusterProxy framework.ClusterProxy) { + preInitFunc(clusterProxy) + // Override capi/capm3 versions exported in preInit + os.Setenv("CAPI_VERSION", "v1beta1") + os.Setenv("CAPM3_VERSION", "v1beta1") + os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath()) + }, + PreWaitForCluster: preWaitForCluster, + PreUpgrade: preUpgrade, + PreCleanupManagementCluster: preCleanupManagementCluster, + MgmtFlavor: osType, + WorkloadFlavor: osType, + } + }) + AfterEach(func() { + // Recreate bmh that was used in capi namespace in metal3 + //#nosec G204 -- We need to pass in the file name here. + cmd := exec.Command("bash", "-c", "kubectl apply -f bmhosts_crs.yaml -n metal3") + cmd.Dir = workDir + output, err := cmd.CombinedOutput() + Logf("Applying bmh to metal3 namespace : \n %v", string(output)) + Expect(err).To(BeNil()) + // wait for all bmh to become available + bootstrapClient := bootstrapClusterProxy.GetClient() + ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace)) + WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{ + Client: bootstrapClient, + Options: []client.ListOption{client.InNamespace(namespace)}, + Replicas: 5, + Intervals: e2eConfig.GetIntervals(specName, "wait-bmh-available"), }) - } else { - - isPreRelease := strings.Contains(os.Getenv("CAPI_TO_RELEASE"), "-") + ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace)) + }) +}) - if isPreRelease { - capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { - return capi_e2e.ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InitWithProvidersContract: "v1alpha4", - InitWithCoreProvider: fmt.Sprintf("capi-system/cluster-api:%s", os.Getenv("CAPI_TO_RELEASE")), - InitWithBootstrapProviders: []string{fmt.Sprintf("capi-kubeadm-bootstrap-system/kubeadm:%s", os.Getenv("CAPI_TO_RELEASE"))}, - InitWithControlPlaneProviders: []string{fmt.Sprintf("capi-kubeadm-control-plane-system/kubeadm:%s", os.Getenv("CAPI_TO_RELEASE"))}, - InitWithInfrastructureProviders: []string{fmt.Sprintf("capm3-system/metal3:%s", os.Getenv("CAPM3_TO_RELEASE"))}, - InitWithBinary: e2eConfig.GetVariable("INIT_WITH_BINARY"), - InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), - WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), - PreInit: preInitFunc, - PreWaitForCluster: preWaitForCluster, - PreUpgrade: preUpgrade, - PreCleanupManagementCluster: preCleanupManagementCluster, - MgmtFlavor: osType, - WorkloadFlavor: osType, - } - }) - } else { - capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { - return capi_e2e.ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InitWithProvidersContract: "v1alpha4", - InitWithBinary: e2eConfig.GetVariable("INIT_WITH_BINARY"), - PreInit: preInitFunc, - PreWaitForCluster: preWaitForCluster, - PreUpgrade: preUpgrade, - PreCleanupManagementCluster: preCleanupManagementCluster, - MgmtFlavor: osType, - WorkloadFlavor: osType, - } - }) +var _ = Describe("When testing cluster upgrade from releases (v1.4=>current) [clusterctl-upgrade]", func() { + BeforeEach(func() { + osType := strings.ToLower(os.Getenv("OS")) + Expect(osType).ToNot(Equal("")) + validateGlobals(specName) + imageURL, imageChecksum := EnsureImage(e2eConfig.GetVariable("v1.28.1")) + os.Setenv("IMAGE_RAW_CHECKSUM", imageChecksum) + os.Setenv("IMAGE_RAW_URL", imageURL) + // We need to override clusterctl apply log folder to avoid getting our credentials exposed. + clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + }) + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithCoreProvider: "cluster-api:v1.4.9", + InitWithBootstrapProviders: []string{"kubeadm:v1.4.9"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.4.9"}, + InitWithInfrastructureProviders: []string{"metal3:v1.4.4"}, + InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), + WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"), + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/vv1.4.9/clusterctl-{OS}-{ARCH}", + PreInit: func(clusterProxy framework.ClusterProxy) { + preInitFunc(clusterProxy) + // Override capi/capm3 versions exported in preInit + os.Setenv("CAPI_VERSION", "v1beta1") + os.Setenv("CAPM3_VERSION", "v1beta1") + os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath()) + }, + PreWaitForCluster: preWaitForCluster, + PreUpgrade: preUpgrade, + PreCleanupManagementCluster: preCleanupManagementCluster, + MgmtFlavor: osType, + WorkloadFlavor: osType, } - } + }) AfterEach(func() { // Recreate bmh that was used in capi namespace in metal3 //#nosec G204 -- We need to pass in the file name here. @@ -114,7 +126,7 @@ var _ = Describe(fmt.Sprintf("When testing cluster upgrade from releases %s > cu cmd.Dir = workDir output, err := cmd.CombinedOutput() Logf("Applying bmh to metal3 namespace : \n %v", string(output)) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) // wait for all bmh to become available bootstrapClient := bootstrapClusterProxy.GetClient() ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace)) @@ -191,12 +203,25 @@ func preWaitForCluster(clusterProxy framework.ClusterProxy, clusterNamespace str // it installs certManager, BMO and Ironic and overrides the default IPs for the workload cluster. func preInitFunc(clusterProxy framework.ClusterProxy) { installCertManager := func(clusterProxy framework.ClusterProxy) { - certManagerLink := fmt.Sprintf("https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml", e2eConfig.GetVariable("CERT_MANAGER_RELEASE")) + certManagerLink := fmt.Sprintf("https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml", config.CertManagerDefaultVersion) err := DownloadFile("/tmp/certManager.yaml", certManagerLink) Expect(err).To(BeNil(), "Unable to download certmanager manifest") certManagerYaml, err := os.ReadFile("/tmp/certManager.yaml") Expect(err).ShouldNot(HaveOccurred()) Expect(clusterProxy.Apply(ctx, certManagerYaml)).ShouldNot(HaveOccurred()) + + By("Wait for cert-manager pods to be available") + deploymentNameList := []string{} + deploymentNameList = append(deploymentNameList, "cert-manager", "cert-manager-cainjector", "cert-manager-webhook") + clientSet := clusterProxy.GetClientSet() + for _, name := range deploymentNameList { + deployment, err := clientSet.AppsV1().Deployments("cert-manager").Get(ctx, name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred(), "Unable to get the deployment %s in namespace %s \n error message: %s", name, "cert-manager", err) + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: clusterProxy.GetClient(), + Deployment: deployment, + }, e2eConfig.GetIntervals(specName, "wait-deployment")...) + } } By("Fetch manifest for bootstrap cluster")