Skip to content

Commit

Permalink
Fixing and simplifying clusterctl upgrade tests
Browse files Browse the repository at this point in the history
Signed-off-by: muhammad adil ghaffar <[email protected]>
  • Loading branch information
adilGhaffarDev committed Mar 28, 2024
1 parent 265d4e4 commit ff087c2
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 113 deletions.
79 changes: 45 additions & 34 deletions test/e2e/config/e2e_conf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,78 +18,89 @@ providers:
- name: cluster-api
type: CoreProvider
versions:
- name: ${CAPI_FROM_RELEASE}
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_FROM_RELEASE}/core-components.yaml"
- name: v1.5.7
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/core-components.yaml"
type: "url"
contract: ${CONTRACT_FROM}
contract: v1beta1
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
files:
- sourcePath: "../data/shared/${CAPI_FROM_RELEASE:0:4}/metadata.yaml"
- name: ${CAPI_TO_RELEASE}
- sourcePath: "../data/shared/v1.5/metadata.yaml"
- name: v1.4.9
# Use manifest from source files
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_TO_RELEASE}/core-components.yaml"
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/core-components.yaml"
type: "url"
contract: ${CONTRACT_TO}
contract: v1beta1
files:
- sourcePath: "../data/shared/${CAPI_TO_RELEASE:0:4}/metadata.yaml"
- sourcePath: "../data/shared/v1.4/metadata.yaml"
replacements:
- old: "--leader-elect"
new: "--leader-elect=false"
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: kubeadm
type: BootstrapProvider
versions:
- name: ${CAPI_FROM_RELEASE} # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_FROM_RELEASE}/bootstrap-components.yaml"
- name: v1.5.7
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/bootstrap-components.yaml"
type: "url"
contract: ${CONTRACT_FROM}
contract: v1beta1
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
files:
- sourcePath: "../data/shared/${CAPI_FROM_RELEASE:0:4}/metadata.yaml"
- name: ${CAPI_TO_RELEASE}
- sourcePath: "../data/shared/v1.5/metadata.yaml"
- name: v1.4.9
# Use manifest from source files
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_TO_RELEASE}/bootstrap-components.yaml"
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/bootstrap-components.yaml"
type: "url"
contract: ${CONTRACT_TO}
contract: v1beta1
files:
- sourcePath: "../data/shared/${CAPI_TO_RELEASE:0:4}/metadata.yaml"
- sourcePath: "../data/shared/v1.4/metadata.yaml"
replacements:
- old: "--leader-elect"
new: "--leader-elect=false"
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: kubeadm
type: ControlPlaneProvider
versions:
- name: ${CAPI_FROM_RELEASE} # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_FROM_RELEASE}/control-plane-components.yaml"
- name: v1.5.7 # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/control-plane-components.yaml"
type: "url"
contract: ${CONTRACT_FROM}
contract: v1beta1
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
files:
- sourcePath: "../data/shared/${CAPI_FROM_RELEASE:0:4}/metadata.yaml"
- name: ${CAPI_TO_RELEASE}
- sourcePath: "../data/shared/v1.5/metadata.yaml"
- name: v1.4.9
# Use manifest from source files
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/${CAPI_TO_RELEASE}/control-plane-components.yaml"
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/control-plane-components.yaml"
type: "url"
contract: ${CONTRACT_TO}
contract: v1beta1
files:
- sourcePath: "../data/shared/${CAPI_TO_RELEASE:0:4}/metadata.yaml"
- sourcePath: "../data/shared/$v1.4/metadata.yaml"
replacements:
- old: "--leader-elect"
new: "--leader-elect=false"
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: metal3
type: InfrastructureProvider
versions:
- name: ${CAPM3_FROM_RELEASE}
value: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/download/${CAPM3_FROM_RELEASE}/infrastructure-components.yaml"
- name: v1.5.3
value: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/download/v1.5.3/infrastructure-components.yaml"
type: "url"
contract: v1beta1
files:
- sourcePath: "../data/infrastructure-metal3/v1.5/metadata.yaml"
targetName: "metadata.yaml"
- sourcePath: "../_out/cluster-template-ubuntu.yaml"
targetName: "cluster-template-ubuntu.yaml"
- sourcePath: "../_out/cluster-template-upgrade-workload.yaml"
targetName: "cluster-template-upgrade-workload.yaml"
- name: v1.4.4
value: "https://github.com/metal3-io/cluster-api-provider-metal3/releases/download/v1.4.4/infrastructure-components.yaml"
type: "url"
contract: ${CONTRACT_FROM}
contract: v1beta1
files:
- sourcePath: "../data/infrastructure-metal3/${CAPM3_FROM_RELEASE:0:4}/metadata.yaml"
- sourcePath: "../data/infrastructure-metal3/v1.4/metadata.yaml"
targetName: "metadata.yaml"
- sourcePath: "../_out/cluster-template-ubuntu.yaml"
targetName: "cluster-template-ubuntu.yaml"
Expand Down
183 changes: 104 additions & 79 deletions test/e2e/upgrade_clusterctl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,110 +11,122 @@ import (
bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
framework "sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const workDir = "/opt/metal3-dev-env/"

var _ = Describe(fmt.Sprintf("When testing cluster upgrade from releases %s > current [clusterctl-upgrade]", os.Getenv("CAPM3_FROM_RELEASE")), func() {
var _ = Describe(fmt.Sprintf("When testing cluster upgrade from releases (v1.5=>current) [clusterctl-upgrade]", os.Getenv("CAPM3_FROM_RELEASE")), func() {
BeforeEach(func() {
osType := strings.ToLower(os.Getenv("OS"))
Expect(osType).ToNot(Equal(""))
validateGlobals(specName)
imageURL, imageChecksum := EnsureImage(e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"))
imageURL, imageChecksum := EnsureImage(e2eConfig.GetVariable("v1.28.1"))
os.Setenv("IMAGE_RAW_CHECKSUM", imageChecksum)
os.Setenv("IMAGE_RAW_URL", imageURL)
// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName())
})
Releasev1 := strings.Contains(os.Getenv("CAPM3_FROM_RELEASE"), "v1")
if Releasev1 {
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
return capi_e2e.ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InitWithCoreProvider: fmt.Sprintf("cluster-api:%s", os.Getenv("CAPI_FROM_RELEASE")),
InitWithBootstrapProviders: []string{fmt.Sprintf("kubeadm:%s", os.Getenv("CAPI_FROM_RELEASE"))},
InitWithControlPlaneProviders: []string{fmt.Sprintf("kubeadm:%s", os.Getenv("CAPI_FROM_RELEASE"))},
InitWithInfrastructureProviders: []string{fmt.Sprintf("metal3:%s", os.Getenv("CAPM3_FROM_RELEASE"))},
InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
InitWithBinary: e2eConfig.GetVariable("INIT_WITH_BINARY"),
PreInit: func(clusterProxy framework.ClusterProxy) {
preInitFunc(clusterProxy)
// Override capi/capm3 versions exported in preInit
os.Setenv("CAPI_VERSION", "v1beta1")
os.Setenv("CAPM3_VERSION", "v1beta1")
os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath())
},
PreWaitForCluster: preWaitForCluster,
PreUpgrade: preUpgrade,
PreCleanupManagementCluster: preCleanupManagementCluster,
MgmtFlavor: osType,
WorkloadFlavor: osType,
}
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
return capi_e2e.ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InitWithCoreProvider: "cluster-api:v1.5.7",
InitWithBootstrapProviders: []string{"kubeadm:v1.5.7"},
InitWithControlPlaneProviders: []string{"kubeadm:v1.5.7"},
InitWithInfrastructureProviders: []string{"metal3:v1.5.3"},
InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.7/clusterctl-{OS}-{ARCH}",
PreInit: func(clusterProxy framework.ClusterProxy) {
preInitFunc(clusterProxy)
// Override capi/capm3 versions exported in preInit
os.Setenv("CAPI_VERSION", "v1beta1")
os.Setenv("CAPM3_VERSION", "v1beta1")
os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath())
},
PreWaitForCluster: preWaitForCluster,
PreUpgrade: preUpgrade,
PreCleanupManagementCluster: preCleanupManagementCluster,
MgmtFlavor: osType,
WorkloadFlavor: osType,
}
})
AfterEach(func() {
// Recreate bmh that was used in capi namespace in metal3
//#nosec G204 -- We need to pass in the file name here.
cmd := exec.Command("bash", "-c", "kubectl apply -f bmhosts_crs.yaml -n metal3")
cmd.Dir = workDir
output, err := cmd.CombinedOutput()
Logf("Applying bmh to metal3 namespace : \n %v", string(output))
Expect(err).To(BeNil())
// wait for all bmh to become available
bootstrapClient := bootstrapClusterProxy.GetClient()
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{
Client: bootstrapClient,
Options: []client.ListOption{client.InNamespace(namespace)},
Replicas: 5,
Intervals: e2eConfig.GetIntervals(specName, "wait-bmh-available"),
})
} else {

isPreRelease := strings.Contains(os.Getenv("CAPI_TO_RELEASE"), "-")
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))
})
})

if isPreRelease {
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
return capi_e2e.ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InitWithProvidersContract: "v1alpha4",
InitWithCoreProvider: fmt.Sprintf("capi-system/cluster-api:%s", os.Getenv("CAPI_TO_RELEASE")),
InitWithBootstrapProviders: []string{fmt.Sprintf("capi-kubeadm-bootstrap-system/kubeadm:%s", os.Getenv("CAPI_TO_RELEASE"))},
InitWithControlPlaneProviders: []string{fmt.Sprintf("capi-kubeadm-control-plane-system/kubeadm:%s", os.Getenv("CAPI_TO_RELEASE"))},
InitWithInfrastructureProviders: []string{fmt.Sprintf("capm3-system/metal3:%s", os.Getenv("CAPM3_TO_RELEASE"))},
InitWithBinary: e2eConfig.GetVariable("INIT_WITH_BINARY"),
InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
PreInit: preInitFunc,
PreWaitForCluster: preWaitForCluster,
PreUpgrade: preUpgrade,
PreCleanupManagementCluster: preCleanupManagementCluster,
MgmtFlavor: osType,
WorkloadFlavor: osType,
}
})
} else {
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
return capi_e2e.ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InitWithProvidersContract: "v1alpha4",
InitWithBinary: e2eConfig.GetVariable("INIT_WITH_BINARY"),
PreInit: preInitFunc,
PreWaitForCluster: preWaitForCluster,
PreUpgrade: preUpgrade,
PreCleanupManagementCluster: preCleanupManagementCluster,
MgmtFlavor: osType,
WorkloadFlavor: osType,
}
})
var _ = Describe("When testing cluster upgrade from releases (v1.4=>current) [clusterctl-upgrade]", func() {
BeforeEach(func() {
osType := strings.ToLower(os.Getenv("OS"))
Expect(osType).ToNot(Equal(""))
validateGlobals(specName)
imageURL, imageChecksum := EnsureImage(e2eConfig.GetVariable("v1.28.1"))
os.Setenv("IMAGE_RAW_CHECKSUM", imageChecksum)
os.Setenv("IMAGE_RAW_URL", imageURL)
// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName())
})
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
return capi_e2e.ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
InitWithCoreProvider: "cluster-api:v1.4.9",
InitWithBootstrapProviders: []string{"kubeadm:v1.4.9"},
InitWithControlPlaneProviders: []string{"kubeadm:v1.4.9"},
InitWithInfrastructureProviders: []string{"metal3:v1.4.4"},
InitWithKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
WorkloadKubernetesVersion: e2eConfig.GetVariable("INIT_WITH_KUBERNETES_VERSION"),
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/vv1.4.9/clusterctl-{OS}-{ARCH}",
PreInit: func(clusterProxy framework.ClusterProxy) {
preInitFunc(clusterProxy)
// Override capi/capm3 versions exported in preInit
os.Setenv("CAPI_VERSION", "v1beta1")
os.Setenv("CAPM3_VERSION", "v1beta1")
os.Setenv("KUBECONFIG_BOOTSTRAP", bootstrapClusterProxy.GetKubeconfigPath())
},
PreWaitForCluster: preWaitForCluster,
PreUpgrade: preUpgrade,
PreCleanupManagementCluster: preCleanupManagementCluster,
MgmtFlavor: osType,
WorkloadFlavor: osType,
}
}
})
AfterEach(func() {
// Recreate bmh that was used in capi namespace in metal3
//#nosec G204 -- We need to pass in the file name here.
cmd := exec.Command("bash", "-c", "kubectl apply -f bmhosts_crs.yaml -n metal3")
cmd.Dir = workDir
output, err := cmd.CombinedOutput()
Logf("Applying bmh to metal3 namespace : \n %v", string(output))
Expect(err).To(BeNil())
Expect(err).ToNot(HaveOccurred())
// wait for all bmh to become available
bootstrapClient := bootstrapClusterProxy.GetClient()
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))
Expand Down Expand Up @@ -191,12 +203,25 @@ func preWaitForCluster(clusterProxy framework.ClusterProxy, clusterNamespace str
// it installs certManager, BMO and Ironic and overrides the default IPs for the workload cluster.
func preInitFunc(clusterProxy framework.ClusterProxy) {
installCertManager := func(clusterProxy framework.ClusterProxy) {
certManagerLink := fmt.Sprintf("https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml", e2eConfig.GetVariable("CERT_MANAGER_RELEASE"))
certManagerLink := fmt.Sprintf("https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml", config.CertManagerDefaultVersion)
err := DownloadFile("/tmp/certManager.yaml", certManagerLink)
Expect(err).To(BeNil(), "Unable to download certmanager manifest")
certManagerYaml, err := os.ReadFile("/tmp/certManager.yaml")
Expect(err).ShouldNot(HaveOccurred())
Expect(clusterProxy.Apply(ctx, certManagerYaml)).ShouldNot(HaveOccurred())

By("Wait for cert-manager pods to be available")
deploymentNameList := []string{}
deploymentNameList = append(deploymentNameList, "cert-manager", "cert-manager-cainjector", "cert-manager-webhook")
clientSet := clusterProxy.GetClientSet()
for _, name := range deploymentNameList {
deployment, err := clientSet.AppsV1().Deployments("cert-manager").Get(ctx, name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred(), "Unable to get the deployment %s in namespace %s \n error message: %s", name, "cert-manager", err)
framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{
Getter: clusterProxy.GetClient(),
Deployment: deployment,
}, e2eConfig.GetIntervals(specName, "wait-deployment")...)
}
}

By("Fetch manifest for bootstrap cluster")
Expand Down

0 comments on commit ff087c2

Please sign in to comment.