From a498ad57a1a591925d07e8069e3c7c3e8c991853 Mon Sep 17 00:00:00 2001 From: Mohammed Boukhalfa Date: Mon, 21 Oct 2024 14:26:24 +0300 Subject: [PATCH] Add Scalability test Signed-off-by: Mohammed Boukhalfa --- Makefile | 1 + docs/ip_reuse.md | 2 +- go.mod | 2 + scripts/ci-e2e.sh | 8 +- scripts/environment.sh | 8 + test/e2e/config/e2e_conf.yaml | 5 +- test/e2e/data/fkas/kustomization.yaml | 2 + test/e2e/data/fkas/resources.yaml | 77 +++++++ .../bases/cluster/cluster-with-kcp.yaml | 8 +- .../bases/cluster/md.yaml | 8 +- .../cluster-with-kcp.yaml | 8 +- .../bases/clusterclass-cluster/md.yaml | 8 +- .../bases/ippool/ippool.yaml | 4 +- .../kustomization.yaml | 3 + test/e2e/scalability_test.go | 204 ++++++++++++++++++ test/go.mod | 2 + test/go.sum | 4 +- 17 files changed, 330 insertions(+), 24 deletions(-) create mode 100644 test/e2e/data/fkas/kustomization.yaml create mode 100644 test/e2e/data/fkas/resources.yaml create mode 100644 test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/kustomization.yaml create mode 100644 test/e2e/scalability_test.go diff --git a/Makefile b/Makefile index d498712e35..6f211cfd23 100644 --- a/Makefile +++ b/Makefile @@ -173,6 +173,7 @@ E2E_TEMPLATES_DIR ?= $(ROOT_DIR)/test/e2e/data/infrastructure-metal3 cluster-templates: $(KUSTOMIZE) ## Generate cluster templates $(KUSTOMIZE) build $(E2E_TEMPLATES_DIR)/cluster-template-ubuntu > $(E2E_OUT_DIR)/cluster-template-ubuntu.yaml $(KUSTOMIZE) build $(E2E_TEMPLATES_DIR)/cluster-template-centos > $(E2E_OUT_DIR)/cluster-template-centos.yaml + $(KUSTOMIZE) build $(E2E_TEMPLATES_DIR)/cluster-template-centos-fake > $(E2E_OUT_DIR)/cluster-template-centos-fake.yaml $(KUSTOMIZE) build $(E2E_TEMPLATES_DIR)/cluster-template-upgrade-workload > $(E2E_OUT_DIR)/cluster-template-upgrade-workload.yaml $(KUSTOMIZE) build $(E2E_TEMPLATES_DIR)/cluster-template-centos-md-remediation > $(E2E_OUT_DIR)/cluster-template-centos-md-remediation.yaml $(KUSTOMIZE) build $(E2E_TEMPLATES_DIR)/cluster-template-ubuntu-md-remediation > $(E2E_OUT_DIR)/cluster-template-ubuntu-md-remediation.yaml diff --git a/docs/ip_reuse.md b/docs/ip_reuse.md index 03f1dab899..e289e5af12 100644 --- a/docs/ip_reuse.md +++ b/docs/ip_reuse.md @@ -56,7 +56,7 @@ included) to the `preAllocations` field in the `IPPool`, i.e: apiVersion: ipam.metal3.io/v1alpha1 kind: IPPool metadata: - name: baremetalv4-pool + name: baremetalv4-pool-${CLUSTER_NAME} namespace: metal3 spec: clusterName: test1 diff --git a/go.mod b/go.mod index 466da14853..a9148ccd2d 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,8 @@ require ( replace github.com/metal3-io/cluster-api-provider-metal3/api => ./api +replace sigs.k8s.io/cluster-api/test => github.com/Nordix/cluster-api/test v1.0.1-0.20241021093551-8634871d5229 + require ( github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh index 953298c6f0..8989281e93 100755 --- a/scripts/ci-e2e.sh +++ b/scripts/ci-e2e.sh @@ -20,8 +20,8 @@ source "${REPO_ROOT}/scripts/environment.sh" # Clone dev-env repo sudo mkdir -p ${WORKING_DIR} sudo chown "${USER}":"${USER}" ${WORKING_DIR} -M3_DEV_ENV_REPO="https://github.com/metal3-io/metal3-dev-env.git" -M3_DEV_ENV_BRANCH=main +M3_DEV_ENV_REPO="https://github.com/Nordix/metal3-dev-env.git" +M3_DEV_ENV_BRANCH=Integrate-fakeIPA/mohammed M3_DEV_ENV_PATH="${M3_DEV_ENV_PATH:-${WORKING_DIR}/metal3-dev-env}" clone_repo "${M3_DEV_ENV_REPO}" "${M3_DEV_ENV_BRANCH}" "${M3_DEV_ENV_PATH}" @@ -42,6 +42,10 @@ if [[ ${GINKGO_FOCUS:-} == "features" ]]; then mkdir -p "$CAPI_CONFIG_FOLDER" echo "ENABLE_BMH_NAME_BASED_PREALLOCATION: true" >"$CAPI_CONFIG_FOLDER/clusterctl.yaml" fi +# if running a scalability test run DevEnv with fakeIPA +if [[ ${GINKGO_FOCUS:-} == "scalability" ]]; then + echo 'export NODES_PLATFORM="fake"' >>"${M3_DEV_ENV_PATH}/config_${USER}.sh" +fi # Run make devenv to boot the source cluster pushd "${M3_DEV_ENV_PATH}" || exit 1 make diff --git a/scripts/environment.sh b/scripts/environment.sh index 7508dd5b12..88a9ac7123 100644 --- a/scripts/environment.sh +++ b/scripts/environment.sh @@ -62,6 +62,14 @@ if [[ ${GINKGO_FOCUS:-} == "clusterctl-upgrade" ]]; then export NUM_NODES="5" fi +# Scalability test environment vars and config +if [[ ${GINKGO_FOCUS:-} == "scalability" ]]; then + export NUM_NODES=${NUM_NODES:-"40"} + export BMH_BATCH_SIZE=${BMH_BATCH_SIZE:-"5"} + export CONTROL_PLANE_MACHINE_COUNT=${CONTROL_PLANE_MACHINE_COUNT:-"1"} + export WORKER_MACHINE_COUNT=${WORKER_MACHINE_COUNT:-"1"} +fi + # Integration test environment vars and config if [[ ${GINKGO_FOCUS:-} == "integration" || ${GINKGO_FOCUS:-} == "basic" ]]; then export NUM_NODES=${NUM_NODES:-"2"} diff --git a/test/e2e/config/e2e_conf.yaml b/test/e2e/config/e2e_conf.yaml index 5405d1db14..1579c4254c 100644 --- a/test/e2e/config/e2e_conf.yaml +++ b/test/e2e/config/e2e_conf.yaml @@ -150,6 +150,8 @@ providers: targetName: "cluster-template-ubuntu.yaml" - sourcePath: "../_out/cluster-template-centos.yaml" targetName: "cluster-template-centos.yaml" + - sourcePath: "../_out/cluster-template-centos-fake.yaml" + targetName: "cluster-template-centos-fake.yaml" - sourcePath: "../_out/clusterclass.yaml" targetName: "clusterclass-test-clusterclass.yaml" - sourcePath: "../_out/cluster-template-centos-md-remediation.yaml" @@ -210,7 +212,8 @@ variables: BMO_RELEASE_0.5: "data/bmo-deployment/overlays/release-0.5" BMO_RELEASE_0.6: "data/bmo-deployment/overlays/release-0.6" BMO_RELEASE_LATEST: "data/bmo-deployment/overlays/release-latest" - + FKAS_RELEASE_LATEST: "data/fkas" + intervals: default/wait-controllers: ["10m", "10s"] default/wait-cluster: ["20m", "30s"] # The second time to check the availibility of the cluster should happen late, so kcp object has time to be created diff --git a/test/e2e/data/fkas/kustomization.yaml b/test/e2e/data/fkas/kustomization.yaml new file mode 100644 index 0000000000..c41bc10157 --- /dev/null +++ b/test/e2e/data/fkas/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- resources.yaml \ No newline at end of file diff --git a/test/e2e/data/fkas/resources.yaml b/test/e2e/data/fkas/resources.yaml new file mode 100644 index 0000000000..4dc59b51de --- /dev/null +++ b/test/e2e/data/fkas/resources.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: fkas-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metal3-fkas-sa + namespace: fkas-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metal3-fkas-role +rules: +- apiGroups: ["metal3.io", "infrastructure.cluster.x-k8s.io"] + resources: ["baremetalhosts", "metal3machines"] + verbs: ["get", "list", "watch"] +- apiGroups: ["cluster.x-k8s.io"] + resources: ["machines"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metal3-fkas-rolebinding +subjects: +- kind: ServiceAccount + name: metal3-fkas-sa + namespace: fkas-system +roleRef: + kind: ClusterRole + name: metal3-fkas-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metal3-fkas-system + namespace: fkas-system +spec: + replicas: 1 + selector: + matchLabels: + app: metal3-fkas-system + template: + metadata: + labels: + app: metal3-fkas-system + spec: + serviceAccountName: metal3-fkas-sa + hostNetwork: true + containers: + - name: metal3-fkas-reconciler + image: 192.168.111.1:5000/localimages/api-server + imagePullPolicy: IfNotPresent + command: ["/reconciler"] + env: + - name: DEBUG + value: "true" + - image: 192.168.111.1:5000/localimages/api-server + imagePullPolicy: IfNotPresent + ports: + - containerPort: 3333 + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DEBUG + value: "true" + name: metal3-fkas diff --git a/test/e2e/data/infrastructure-metal3/bases/cluster/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-metal3/bases/cluster/cluster-with-kcp.yaml index 50fb65f226..b99b3261ed 100644 --- a/test/e2e/data/infrastructure-metal3/bases/cluster/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-metal3/bases/cluster/cluster-with-kcp.yaml @@ -82,7 +82,7 @@ spec: metaData: ipAddressesFromIPPool: - key: provisioningIP - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} objectNames: - key: name object: machine @@ -92,7 +92,7 @@ spec: object: machine prefixesFromIPPool: - key: provisioningCIDR - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} networkData: links: ethernets: @@ -107,11 +107,11 @@ spec: networks: ipv4: - id: baremetalv4 - ipAddressFromIPPool: baremetalv4-pool + ipAddressFromIPPool: baremetalv4-pool-${CLUSTER_NAME} link: enp2s0 routes: - gateway: - fromIPPool: baremetalv4-pool + fromIPPool: baremetalv4-pool-${CLUSTER_NAME} network: 0.0.0.0 prefix: 0 services: diff --git a/test/e2e/data/infrastructure-metal3/bases/cluster/md.yaml b/test/e2e/data/infrastructure-metal3/bases/cluster/md.yaml index 2c9414603a..da2eb2acd6 100644 --- a/test/e2e/data/infrastructure-metal3/bases/cluster/md.yaml +++ b/test/e2e/data/infrastructure-metal3/bases/cluster/md.yaml @@ -59,7 +59,7 @@ spec: metaData: ipAddressesFromIPPool: - key: provisioningIP - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} objectNames: - key: name object: machine @@ -69,7 +69,7 @@ spec: object: machine prefixesFromIPPool: - key: provisioningCIDR - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} networkData: links: ethernets: @@ -84,11 +84,11 @@ spec: networks: ipv4: - id: baremetalv4 - ipAddressFromIPPool: baremetalv4-pool + ipAddressFromIPPool: baremetalv4-pool-${CLUSTER_NAME} link: enp2s0 routes: - gateway: - fromIPPool: baremetalv4-pool + fromIPPool: baremetalv4-pool-${CLUSTER_NAME} network: 0.0.0.0 prefix: 0 services: diff --git a/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/cluster-with-kcp.yaml index 620ffef6a3..f6c554d8d6 100644 --- a/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/cluster-with-kcp.yaml @@ -43,7 +43,7 @@ spec: metaData: ipAddressesFromIPPool: - key: provisioningIP - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} objectNames: - key: name object: machine @@ -53,7 +53,7 @@ spec: object: machine prefixesFromIPPool: - key: provisioningCIDR - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} networkData: links: ethernets: @@ -68,11 +68,11 @@ spec: networks: ipv4: - id: baremetalv4 - ipAddressFromIPPool: baremetalv4-pool + ipAddressFromIPPool: baremetalv4-pool-${CLUSTER_NAME} link: enp2s0 routes: - gateway: - fromIPPool: baremetalv4-pool + fromIPPool: baremetalv4-pool-${CLUSTER_NAME} network: 0.0.0.0 prefix: 0 services: diff --git a/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/md.yaml b/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/md.yaml index be01049aba..760ab9cb06 100644 --- a/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/md.yaml +++ b/test/e2e/data/infrastructure-metal3/bases/clusterclass-cluster/md.yaml @@ -24,7 +24,7 @@ spec: metaData: ipAddressesFromIPPool: - key: provisioningIP - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} objectNames: - key: name object: machine @@ -34,7 +34,7 @@ spec: object: machine prefixesFromIPPool: - key: provisioningCIDR - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} networkData: links: ethernets: @@ -49,11 +49,11 @@ spec: networks: ipv4: - id: baremetalv4 - ipAddressFromIPPool: baremetalv4-pool + ipAddressFromIPPool: baremetalv4-pool-${CLUSTER_NAME} link: enp2s0 routes: - gateway: - fromIPPool: baremetalv4-pool + fromIPPool: baremetalv4-pool-${CLUSTER_NAME} network: 0.0.0.0 prefix: 0 services: diff --git a/test/e2e/data/infrastructure-metal3/bases/ippool/ippool.yaml b/test/e2e/data/infrastructure-metal3/bases/ippool/ippool.yaml index d845251896..365b3b4b9e 100644 --- a/test/e2e/data/infrastructure-metal3/bases/ippool/ippool.yaml +++ b/test/e2e/data/infrastructure-metal3/bases/ippool/ippool.yaml @@ -2,7 +2,7 @@ apiVersion: ipam.metal3.io/v1alpha1 kind: IPPool metadata: - name: provisioning-pool + name: provisioning-pool-${CLUSTER_NAME} namespace: ${NAMESPACE} spec: clusterName: ${CLUSTER_NAME} @@ -15,7 +15,7 @@ spec: apiVersion: ipam.metal3.io/v1alpha1 kind: IPPool metadata: - name: baremetalv4-pool + name: baremetalv4-pool-${CLUSTER_NAME} namespace: ${NAMESPACE} spec: clusterName: ${CLUSTER_NAME} diff --git a/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/kustomization.yaml b/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/kustomization.yaml new file mode 100644 index 0000000000..dff8d99b73 --- /dev/null +++ b/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- ../bases/ippool +- ../bases/centos-kubeadm-config diff --git a/test/e2e/scalability_test.go b/test/e2e/scalability_test.go new file mode 100644 index 0000000000..ef2518d8ef --- /dev/null +++ b/test/e2e/scalability_test.go @@ -0,0 +1,204 @@ +package e2e + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/utils/ptr" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +/* + * This test apply cluster templates on fake environments to test scalability + * apply scaled number of BMHs per batches and wait for the them to become available + * When all the BMHs become available it apply the cluster templates + */ + +var _ = Describe("When testing scalability with fakeIPA and FKAS [scalability]", Label("scalability"), func() { + BeforeEach(func() { + osType := strings.ToLower(os.Getenv("OS")) + Expect(osType).ToNot(Equal("")) + validateGlobals(specName) + specName = "scale" + namespace = "scale" + numberOfWorkers = int(*e2eConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT")) + numberOfControlplane = int(*e2eConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT")) + // We need to override clusterctl apply log folder to avoid getting our credentials exposed. + clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + createFKASResources() + }) + + // Create the cluster + capi_e2e.ScaleSpec(ctx, func() capi_e2e.ScaleSpecInput { + return capi_e2e.ScaleSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + InfrastructureProvider: ptr.To("metal3"), + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + ClusterCount: ptr.To[int64](5), + Concurrency: ptr.To[int64](2), + Flavor: ptr.To(fmt.Sprintf("%s-fake", osType)), + ControlPlaneMachineCount: ptr.To[int64](int64(numberOfControlplane)), + MachineDeploymentCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](int64(numberOfWorkers)), + PostScaleClusterNamespaceCreated: postScaleClusterNamespaceCreated, + DeployClusterInSeparateNamespaces: false, + } + }) + + AfterEach(func() { + FKASKustomization := e2eConfig.GetVariable("FKAS_RELEASE_LATEST") + By(fmt.Sprintf("Removing FKAS from kustomization %s from the bootsrap cluster", FKASKustomization)) + BuildAndRemoveKustomization(ctx, FKASKustomization, bootstrapClusterProxy) + DumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, e2eConfig.GetIntervals, clusterName, clusterctlLogFolder, skipCleanup) + }) +}) + +func registerFKASCluster(cn string, ns string) (string, int) { + /* + * this function reads the certificate from the cluster secrets + */ + type FKASCluster struct { + Cluster string `json:"cluster"` + Namespace string `json:"namespace"` + } + type Endpoint struct { + Host string + Port int + } + + fkasCluster := FKASCluster{ + Cluster: cn, + Namespace: ns, + } + marshalled, err := json.Marshal(fkasCluster) + if err != nil { + Logf("impossible to marshall fkasCluster: %s", err) + } + // send the request + cluster_endpoints, err := http.Post("http://172.22.0.2:3333/register", "application/json", bytes.NewReader(marshalled)) + Expect(err).NotTo(HaveOccurred()) + defer cluster_endpoints.Body.Close() + body, err := ioutil.ReadAll(cluster_endpoints.Body) + Expect(err).NotTo(HaveOccurred()) + var response Endpoint + json.Unmarshal(body, &response) + return response.Host, response.Port +} + +func createFKASResources() { + FKASDeployLogFolder := filepath.Join(os.TempDir(), "fkas-deploy-logs", bootstrapClusterProxy.GetName()) + FKASKustomization := e2eConfig.GetVariable("FKAS_RELEASE_LATEST") + By(fmt.Sprintf("Installing FKAS from kustomization %s on the bootsrap cluster", FKASKustomization)) + err := BuildAndApplyKustomization(ctx, &BuildAndApplyKustomizationInput{ + Kustomization: FKASKustomization, + ClusterProxy: bootstrapClusterProxy, + WaitForDeployment: true, + WatchDeploymentLogs: true, + LogPath: FKASDeployLogFolder, + DeploymentName: "metal3-fkas-system", + DeploymentNamespace: "fkas-system", + WaitIntervals: e2eConfig.GetIntervals("default", "wait-deployment"), + }) + Expect(err).NotTo(HaveOccurred()) +} + +func LogToFile(logFile string, data []byte) { + err := ioutil.WriteFile(filepath.Clean(logFile), data, 0644) + Expect(err).ToNot(HaveOccurred(), "Cannot log to file") +} + +// the function to create the bmh needed in the namesace and call fkas to prepare a fakecluster and update templates accordingly +// PostNamespaceCreated +// (managementClusterProxy framework.ClusterProxy, namespace, clusterName string, clusterTemplateYAML []byte){ +func postScaleClusterNamespaceCreated(clusterProxy framework.ClusterProxy, clusterNamespace string, clusterName string, baseClusterTemplateYAML []byte) (template []byte) { + c := clusterProxy.GetClient() + + getClusterId := func(clusterName string) (index int) { + re := regexp.MustCompile("[0-9]+$") + index, _ = strconv.Atoi(string((re.Find([]byte(clusterName))))) + return + + } + + getBmhsCountNeeded := func() (sum int) { + numberOfWorkers = int(*e2eConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT")) + numberOfControlplane = int(*e2eConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT")) + sum = numberOfWorkers + numberOfControlplane + return + } + + getBmhsFromToIndex := func(clusterIndex int, bmhCount int) (from int, to int) { + to = clusterIndex*bmhCount - 1 + from = to - bmhCount + 1 + return + } + + applyBmhsByBatch := func(batchSize int, from int, to int) { + applyBatchBmh := func(from int, to int) { + bmhsNameList := make([]string, 0, to-from+1) + Logf("Apply BMH batch from node_%d to node_%d", from, to) + for i := from; i < to+1; i++ { + bmhsNameList = append(bmhsNameList, fmt.Sprintf("node-%d", i)) + resource, err := os.ReadFile(filepath.Join(workDir, fmt.Sprintf("bmhs/node_%d.yaml", i))) + Expect(err).ShouldNot(HaveOccurred()) + Expect(CreateOrUpdateWithNamespace(ctx, clusterProxy, resource, clusterNamespace)).ShouldNot(HaveOccurred()) + } + // I need to get a list of bmh names we are getting bmhlist to avoid http request one by one + // TODO (mboukhalfa) if the clusters are using the same namespace then might pickup the bmh from the list + // that we are waiting to become available aby another cluster then the bmh number avialble will never reached + Logf("Waiting for BMHs from node_%d to node_%d to become available", from, to) + Eventually(func(g Gomega) { + bmhList := bmov1alpha1.BareMetalHostList{} + g.Expect(c.List(ctx, &bmhList, []client.ListOption{client.InNamespace(clusterNamespace)}...)).To(Succeed()) + g.Expect(FilterAvialableBmhsName(bmhList.Items, bmhsNameList, bmov1alpha1.StateAvailable)).To(HaveLen(to - from + 1)) + }, e2eConfig.GetIntervals(specName, "wait-bmh-available")...).Should(Succeed()) + ListBareMetalHosts(ctx, c, []client.ListOption{client.InNamespace(clusterNamespace)}...) + } + for i := from; i <= to; i += batchSize { + if i+batchSize > to { + applyBatchBmh(i, to) + break + } + applyBatchBmh(i, i+batchSize-1) + } + } + index := getClusterId(clusterName) + cn := getBmhsCountNeeded() + f, t := getBmhsFromToIndex(index, cn) + batch, _ := strconv.Atoi(e2eConfig.GetVariable("BMH_BATCH_SIZE")) + applyBmhsByBatch(batch, f, t) + + h, p := registerFKASCluster(clusterName, clusterNamespace) + clusterTemplateYAML := bytes.Replace(baseClusterTemplateYAML, []byte("CLUSTER_APIENDPOINT_HOST_HOLDER"), []byte(h), -1) + clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, []byte("CLUSTER_APIENDPOINT_PORT_HOLDER"), []byte(strconv.Itoa(p)), -1) + + return clusterTemplateYAML +} + +// FilterAvialableBmhsName returns a filtered list of BaremetalHost objects in certain provisioning state. +func FilterAvialableBmhsName(bmhs []bmov1alpha1.BareMetalHost, bmhsNameList []string, state bmov1alpha1.ProvisioningState) (result []bmov1alpha1.BareMetalHost) { + for _, bmh := range bmhs { + for _, name := range bmhsNameList { + if bmh.Name == name && bmh.Status.Provisioning.State == state { + result = append(result, bmh) + } + } + } + return +} diff --git a/test/go.mod b/test/go.mod index e838e39ce6..35d29f18cd 100644 --- a/test/go.mod +++ b/test/go.mod @@ -31,6 +31,8 @@ require ( replace github.com/metal3-io/cluster-api-provider-metal3/api => ./../api +replace sigs.k8s.io/cluster-api/test => github.com/Nordix/cluster-api/test v1.0.1-0.20241021093551-8634871d5229 + require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/BurntSushi/toml v1.4.0 // indirect diff --git a/test/go.sum b/test/go.sum index 1ad6de5b96..09fee272bc 100644 --- a/test/go.sum +++ b/test/go.sum @@ -14,6 +14,8 @@ github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Nordix/cluster-api/test v1.0.1-0.20241021093551-8634871d5229 h1:vVMA+4WjMjtW3amFTN9eZjFDYURLqHEQ8ro/r+qVBZ8= +github.com/Nordix/cluster-api/test v1.0.1-0.20241021093551-8634871d5229/go.mod h1:odnzMkDndCRPCWdwl0CRofyZyY857wN34bUih1MLKIc= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/adrg/xdg v0.5.0 h1:dDaZvhMXatArP1NPHhnfaQUqWBLBsmx1h1HXQdMoFCY= @@ -505,8 +507,6 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 h1:Tc9rS7JJoZ9sl sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0/go.mod h1:1ewhL9l1gkPcU/IU/6rFYfikf+7Y5imWv7ARVbBOzNs= sigs.k8s.io/cluster-api v1.8.4 h1:jBKQH1H/HUdUFk8T6qDzIxZJfWw1F5ZP0ZpYQJDmTHs= sigs.k8s.io/cluster-api v1.8.4/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= -sigs.k8s.io/cluster-api/test v1.8.4 h1:0SVj0x/pZm5yaEkl2Rj7i3YXIbZ0vCGcUrwI26RGMgc= -sigs.k8s.io/cluster-api/test v1.8.4/go.mod h1:odnzMkDndCRPCWdwl0CRofyZyY857wN34bUih1MLKIc= sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=