diff --git a/Makefile b/Makefile index 27e3d0088b..8d57d6148c 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,6 @@ GINKGO_NODES ?= 1 GINKGO_TIMEOUT ?= 3h E2E_CONF_FILE ?= $(abspath test/e2e/config/vsphere.yaml) E2E_CONF_OVERRIDE_FILE ?= $(abspath test/e2e/config/config-overrides.yaml) -E2E_CAPV_MODE ?= govmomi E2E_IPAM_KUBECONFIG ?= INTEGRATION_CONF_FILE ?= $(abspath test/integration/integration-dev.yaml) E2E_TEMPLATE_DIR := $(abspath test/e2e/data/) @@ -600,7 +599,6 @@ e2e: $(GINKGO) $(KUSTOMIZE) $(KIND) $(GOVC) ## Run e2e tests --e2e.artifacts-folder="$(ARTIFACTS)" \ --e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ --e2e.use-existing-cluster="$(USE_EXISTING_CLUSTER)" \ - --e2e.capv-mode="$(E2E_CAPV_MODE)" \ --e2e.ipam-kubeconfig="$(E2E_IPAM_KUBECONFIG)" ## -------------------------------------- diff --git a/apis/vmware/v1beta1/vspherecluster_types.go b/apis/vmware/v1beta1/vspherecluster_types.go index 6fe2f30724..63f9404b43 100644 --- a/apis/vmware/v1beta1/vspherecluster_types.go +++ b/apis/vmware/v1beta1/vspherecluster_types.go @@ -30,6 +30,7 @@ const ( // VSphereClusterSpec defines the desired state of VSphereCluster. type VSphereClusterSpec struct { + // +optional ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` } diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml index d8b57c07f8..881adabf6b 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml @@ -50,8 +50,6 @@ spec: - host - port type: object - required: - - controlPlaneEndpoint type: object status: description: VSphereClusterStatus defines the observed state of VSphereClusterSpec. diff --git a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml index 0ed3101f0c..c14330e350 100644 --- a/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml +++ b/config/supervisor/crd/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml @@ -58,8 +58,6 @@ spec: - host - port type: object - required: - - controlPlaneEndpoint type: object required: - spec diff --git a/hack/e2e.sh b/hack/e2e.sh index 5c945ca079..409594f933 100755 --- a/hack/e2e.sh +++ b/hack/e2e.sh @@ -68,52 +68,76 @@ function login() { AUTH= E2E_IMAGE_SHA= GCR_KEY_FILE="${GCR_KEY_FILE:-}" -export VSPHERE_SERVER="${GOVC_URL}" -export VSPHERE_USERNAME="${GOVC_USERNAME}" -export VSPHERE_PASSWORD="${GOVC_PASSWORD}" -export VSPHERE_SSH_AUTHORIZED_KEY="${VM_SSH_PUB_KEY}" +export VSPHERE_SERVER="${GOVC_URL:-}" +export VSPHERE_USERNAME="${GOVC_USERNAME:-}" +export VSPHERE_PASSWORD="${GOVC_PASSWORD:-}" +export VSPHERE_SSH_AUTHORIZED_KEY="${VM_SSH_PUB_KEY:-}" export VSPHERE_SSH_PRIVATE_KEY="/root/ssh/.private-key/private-key" export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/vsphere.yaml" export E2E_CONF_OVERRIDE_FILE="" -export E2E_CAPV_MODE="${CAPV_MODE:-govmomi}" +export E2E_VM_OPERATOR_VERSION="${VM_OPERATOR_VERSION:-v1.8.1}" export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" export DOCKER_IMAGE_TAR="/tmp/images/image.tar" export GC_KIND="false" # Make tests run in-parallel export GINKGO_NODES=5 + # Set the kubeconfig to the IPAM cluster so the e2e tests can claim ip addresses # for kube-vip. export E2E_IPAM_KUBECONFIG="/root/ipam-conf/capv-services.conf" -# Run the vpn client in container -docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ - -w "${HOME}/.openvpn/" --cap-add=NET_ADMIN --net=host --device=/dev/net/tun \ - gcr.io/k8s-staging-capi-vsphere/extra/openvpn:latest - -# Tail the vpn logs -docker logs vpn - -# Wait until the VPN connection is active and we are able to reach the ipam cluster -function wait_for_ipam_reachable() { - local n=0 - until [ $n -ge 30 ]; do - kubectl --kubeconfig="${E2E_IPAM_KUBECONFIG}" --request-timeout=2s get inclusterippools.ipam.cluster.x-k8s.io && RET=$? || RET=$? - if [[ "$RET" -eq 0 ]]; then - break - fi - n=$((n + 1)) - sleep 1 - done - return "$RET" -} -wait_for_ipam_reachable +# Only run the vpn/check for IPAM when we need them +re='\[vcsim\]' +if [[ ! "${GINKGO_FOCUS:-}" =~ $re ]]; then + # Run the vpn client in container + docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ + -w "${HOME}/.openvpn/" --cap-add=NET_ADMIN --net=host --device=/dev/net/tun \ + gcr.io/k8s-staging-capi-vsphere/extra/openvpn:latest + + # Tail the vpn logs + docker logs vpn + + # Wait until the VPN connection is active and we are able to reach the ipam cluster + function wait_for_ipam_reachable() { + local n=0 + until [ $n -ge 30 ]; do + kubectl --kubeconfig="${E2E_IPAM_KUBECONFIG}" --request-timeout=2s get inclusterippools.ipam.cluster.x-k8s.io && RET=$? || RET=$? + if [[ "$RET" -eq 0 ]]; then + break + fi + n=$((n + 1)) + sleep 1 + done + return "$RET" + } + wait_for_ipam_reachable +fi make envsubst +# kind:prepullImage pre-pull a docker image if no already present locally. +# The result will be available in the retVal value which is accessible from the caller. +kind::prepullImage () { + local image=$1 + image="${image//+/_}" + + if [[ "$(docker images -q "$image" 2> /dev/null)" == "" ]]; then + echo "+ Pulling $image" + docker pull "$image" + else + echo "+ image $image already present in the system, skipping pre-pull" + fi +} + +if [[ ${GINKGO_FOCUS:-} =~ \\\[supervisor\\\] ]]; then + kind::prepullImage "gcr.io/k8s-staging-capi-vsphere/extra/vm-operator:${E2E_VM_OPERATOR_VERSION}" +fi + ARCH="$(go env GOARCH)" # Only build and upload the image if we run tests which require it to save some $. +# NOTE: the image is required for clusterctl upgrade tests, and those test are run only as part of the main e2e test job (without any focus) if [[ -z "${GINKGO_FOCUS+x}" ]]; then # Save the docker image locally make e2e-images diff --git a/test/e2e/anti_affinity_test.go b/test/e2e/anti_affinity_test.go index 1ac86484de..9c2a5662f7 100644 --- a/test/e2e/anti_affinity_test.go +++ b/test/e2e/anti_affinity_test.go @@ -48,7 +48,7 @@ type AntiAffinitySpecInput struct { var _ = Describe("Cluster creation with anti affined nodes", func() { const specName = "anti-affinity" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { var namespace *corev1.Namespace BeforeEach(func() { @@ -72,7 +72,7 @@ var _ = Describe("Cluster creation with anti affined nodes", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/e2e/capi_machine_deployment_rollout_test.go b/test/e2e/capi_machine_deployment_rollout_test.go index c9ec3778ed..194a97b269 100644 --- a/test/e2e/capi_machine_deployment_rollout_test.go +++ b/test/e2e/capi_machine_deployment_rollout_test.go @@ -19,19 +19,22 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) var _ = Describe("ClusterAPI Machine Deployment Tests", func() { const specName = "md-rollout" // copied from CAPI Context("Running the MachineDeployment rollout spec", func() { - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.MachineDeploymentRolloutSpec(ctx, func() capi_e2e.MachineDeploymentRolloutSpecInput { return capi_e2e.MachineDeploymentRolloutSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + Flavor: testSpecificSettingsGetter().FlavorForMode(clusterctl.DefaultFlavor), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index c6d12a482a..223aac3769 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -33,7 +33,7 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass and testi // the resolved versions as env vars. This only works without side effects on other tests because we are // running this test in its separate job. const specName = "k8s-upgrade-and-conformance" // copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { // The Kubernetes versions have to be resolved as they can be defined like this: stable-1.29, ci/latest-1.30. kubernetesVersionUpgradeFrom, err := kubernetesversions.ResolveVersion(ctx, e2eConfig.GetVariable("KUBERNETES_VERSION_UPGRADE_FROM")) @@ -44,14 +44,15 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass and testi Expect(os.Setenv("KUBERNETES_VERSION_UPGRADE_TO", kubernetesVersionUpgradeTo)).To(Succeed()) return capi_e2e.ClusterUpgradeConformanceSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, WorkerMachineCount: ptr.To[int64](5), // Note: install-on-bootstrap will install Kubernetes on bootstrap if the correct Kubernetes version // cannot be detected. This is required to install versions we don't have images for (e.g. ci/latest-1.30). - Flavor: ptr.To("install-on-bootstrap"), + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("install-on-bootstrap")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) @@ -61,15 +62,16 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass [ClusterC // Note: This installs a cluster based on KUBERNETES_VERSION_UPGRADE_FROM and then upgrades to // KUBERNETES_VERSION_UPGRADE_TO. const specName = "k8s-upgrade" // aligned to CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { return capi_e2e.ClusterUpgradeConformanceSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: ptr.To("topology"), + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("topology")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. ControlPlaneMachineCount: ptr.To[int64](1), diff --git a/test/e2e/clusterclass_changes_test.go b/test/e2e/clusterclass_changes_test.go index 075b78ad81..b4c9a31e9f 100644 --- a/test/e2e/clusterclass_changes_test.go +++ b/test/e2e/clusterclass_changes_test.go @@ -23,15 +23,16 @@ import ( var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() { const specName = "clusterclass-changes" // copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capie2e.ClusterClassChangesSpec(ctx, func() capie2e.ClusterClassChangesSpecInput { return capie2e.ClusterClassChangesSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: "topology", + Flavor: testSpecificSettingsGetter().FlavorForMode("topology"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, ModifyControlPlaneFields: map[string]interface{}{ "spec.machineTemplate.nodeDrainTimeout": "10s", }, diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index aaf6f92064..efc219cc1f 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -23,15 +23,16 @@ import ( var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9=>current, CAPI 1.6=>1.6) [ClusterClass]", func() { const specName = "clusterctl-upgrade-1.9-current" // prefix (clusterctl-upgrade) copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - MgmtFlavor: "remote-management", + MgmtFlavor: testSpecificSettingsGetter().FlavorForMode("remote-management"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.1/clusterctl-{OS}-{ARCH}", InitWithCoreProvider: "cluster-api:v1.6.1", InitWithBootstrapProviders: []string{"kubeadm:v1.6.1"}, @@ -44,7 +45,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9= // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml InitWithKubernetesVersion: "v1.29.0", WorkloadKubernetesVersion: "v1.29.0", - WorkloadFlavor: "workload", + WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), } }) }, WithIP("WORKLOAD_CONTROL_PLANE_ENDPOINT_IP")) @@ -52,15 +53,16 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9= var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8=>current, CAPI 1.5=>1.6) [ClusterClass]", func() { const specName = "clusterctl-upgrade-1.8-current" // prefix (clusterctl-upgrade) copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - MgmtFlavor: "remote-management", + MgmtFlavor: testSpecificSettingsGetter().FlavorForMode("remote-management"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.4/clusterctl-{OS}-{ARCH}", InitWithCoreProvider: "cluster-api:v1.5.4", InitWithBootstrapProviders: []string{"kubeadm:v1.5.4"}, @@ -73,7 +75,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8= // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml InitWithKubernetesVersion: "v1.28.0", WorkloadKubernetesVersion: "v1.28.0", - WorkloadFlavor: "workload", + WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), } }) }, WithIP("WORKLOAD_CONTROL_PLANE_ENDPOINT_IP")) diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 103f44588b..8e4ac67515 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -20,6 +20,8 @@ images: loadBehavior: mustLoad - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-vcsim-controller-{ARCH}:dev loadBehavior: mustLoad + - name: gcr.io/k8s-staging-capi-vsphere/extra/vm-operator:v1.8.1 + loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-cainjector:v1.12.2 loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-webhook:v1.12.2 @@ -166,7 +168,7 @@ providers: - sourcePath: "../data/shared/v1.8/v1beta1_provider/metadata.yaml" - name: vcsim - type: InfrastructureProvider + type: RuntimeExtensionProvider # vcsim isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. versions: - name: v1.10.99 # Use manifest from source files @@ -176,6 +178,20 @@ providers: # Add cluster templates - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" + - name: vm-operator + type: RuntimeExtensionProvider # vm-operator isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. + versions: + - name: v1.8.1 + # Use manifest from source files + value: "https://storage.googleapis.com/artifacts.k8s-staging-capi-vsphere.appspot.com/vm-operator/v1.8.1.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/main/v1beta1_operator/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + variables: # Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml KUBERNETES_VERSION: "v1.29.0" @@ -210,6 +226,8 @@ variables: EXP_NODE_ANTI_AFFINITY: "true" CAPI_DIAGNOSTICS_ADDRESS: ":8080" CAPI_INSECURE_DIAGNOSTICS: "true" + SERVICE_ACCOUNTS_CM_NAME: "" + SERVICE_ACCOUNTS_CM_NAMESPACE: "" intervals: default/wait-controllers: ["5m", "10s"] diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-vsphere-template.yaml index 9bef1c7ff7..8633dc282c 100644 --- a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-vsphere-template.yaml +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-vsphere-template.yaml @@ -4,7 +4,7 @@ definitions: - jsonPatches: - op: replace - path: /spec/template/spec/template + path: /spec/template/spec/imageName valueFrom: # We have to fall back to v1.29.0 for the conformance latest ci test which uses # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". @@ -17,13 +17,13 @@ ubuntu-2204-kube-v1.29.0 {{- end -}}{{- end -}} selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 kind: VSphereMachineTemplate matchResources: controlPlane: true - jsonPatches: - op: replace - path: /spec/template/spec/template + path: /spec/template/spec/imageName valueFrom: # We have to fall back to v1.29.0 for the conformance latest ci test which uses # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". @@ -36,7 +36,7 @@ ubuntu-2204-kube-v1.29.0 {{- end -}}{{- end -}} selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 kind: VSphereMachineTemplate matchResources: machineDeploymentClass: diff --git a/test/e2e/data/shared/main/v1beta1_operator/metadata.yaml b/test/e2e/data/shared/main/v1beta1_operator/metadata.yaml new file mode 100644 index 0000000000..d67201dfca --- /dev/null +++ b/test/e2e/data/shared/main/v1beta1_operator/metadata.yaml @@ -0,0 +1,11 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 8 + contract: v1beta1 diff --git a/test/e2e/dhcp_overrides_test.go b/test/e2e/dhcp_overrides_test.go index 237321b0d1..1bb7e09a14 100644 --- a/test/e2e/dhcp_overrides_test.go +++ b/test/e2e/dhcp_overrides_test.go @@ -53,7 +53,7 @@ type DHCPOverrides struct { var _ = Describe("DHCPOverrides configuration test", func() { When("Creating a cluster with DHCPOverrides configured", func() { const specName = "dhcp-overrides" - Setup("dhcp-overrides", func(testSpecificClusterctlConfigPathGetter func() string) { + Setup("dhcp-overrides", func(testSpecificSettingsGetter func() testSettings) { var namespace *corev1.Namespace BeforeEach(func() { @@ -73,10 +73,10 @@ var _ = Describe("DHCPOverrides configuration test", func() { ClusterProxy: bootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, - Flavor: "dhcp-overrides", + Flavor: testSpecificSettingsGetter().FlavorForMode("dhcp-overrides"), Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), diff --git a/test/e2e/e2e_setup_test.go b/test/e2e/e2e_setup_test.go index faa8576867..91262cbaf5 100644 --- a/test/e2e/e2e_setup_test.go +++ b/test/e2e/e2e_setup_test.go @@ -21,11 +21,16 @@ import ( "fmt" "os" "strings" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/utils/ptr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/test/framework" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" + crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" vsphereip "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/ip" @@ -56,8 +61,14 @@ func WithGateway(variableName string) SetupOption { } } +type testSettings struct { + ClusterctlConfigPath string + PostNamespaceCreatedFunc func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) + FlavorForMode func(flavor string) string +} + // Setup for the specific test. -func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() string), opts ...SetupOption) { +func Setup(specName string, f func(testSpecificSettings func() testSettings), opts ...SetupOption) { options := &setupOptions{} for _, o := range opts { o(options) @@ -67,6 +78,7 @@ func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() testSpecificClusterctlConfigPath string testSpecificIPAddressClaims vsphereip.AddressClaims testSpecificVariables map[string]string + postNamespaceCreatedFunc func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) ) BeforeEach(func() { Byf("Setting up test env for %s", specName) @@ -76,18 +88,54 @@ func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() // get IPs from the in cluster address manager testSpecificIPAddressClaims, testSpecificVariables = inClusterAddressManager.ClaimIPs(ctx, vsphereip.WithGateway(options.gatewayIPVariableName), vsphereip.WithIP(options.additionalIPVariableNames...)) case VCSimTestTarget: - Byf("Getting IP for %s", strings.Join(append([]string{vsphereip.ControlPlaneEndpointIPVariable}, options.additionalIPVariableNames...), ",")) + c := bootstrapClusterProxy.GetClient() // get IPs from the vcsim controller + // NOTE: ControlPlaneEndpointIP is the first claim in the returned list (this assumption is used below). + Byf("Getting IP for %s", strings.Join(append([]string{vsphereip.ControlPlaneEndpointIPVariable}, options.additionalIPVariableNames...), ",")) testSpecificIPAddressClaims, testSpecificVariables = vcsimAddressManager.ClaimIPs(ctx, vsphereip.WithIP(options.additionalIPVariableNames...)) - Byf("Creating a vcsim server for %s", specName) + // variables derived from the vCenterSimulator + vCenterSimulator, err := vspherevcsim.Get(ctx, c) + Expect(err).ToNot(HaveOccurred(), "Failed to get VCenterSimulator") + + Byf("Creating EnvVar %s", klog.KRef(metav1.NamespaceDefault, specName)) + envVar := &vcsimv1.EnvVar{ + ObjectMeta: metav1.ObjectMeta{ + Name: specName, + Namespace: metav1.NamespaceDefault, + }, + Spec: vcsimv1.EnvVarSpec{ + VCenterSimulator: &vcsimv1.NamespacedRef{ + Namespace: vCenterSimulator.Namespace, + Name: vCenterSimulator.Name, + }, + ControlPlaneEndpoint: vcsimv1.NamespacedRef{ + Namespace: testSpecificIPAddressClaims[0].Namespace, + Name: testSpecificIPAddressClaims[0].Name, + }, + // NOTE: we are omitting VMOperatorDependencies because it is not created yet (it will be created by the PostNamespaceCreated hook) + // But this is not a issue because a default dependenciesConfig that works for vcsim will be automatically used. + }, + } + + err = c.Create(ctx, envVar) + Expect(err).ToNot(HaveOccurred(), "Failed to create EnvVar") - // variables for govmomi mode derived from the vCenterSimulator - vCenterSimulator, err := vspherevcsim.Get(ctx, bootstrapClusterProxy.GetClient()) - Expect(err).ToNot(HaveOccurred(), "Failed to create VCenterSimulator") + Eventually(func() bool { + if err := c.Get(ctx, crclient.ObjectKeyFromObject(envVar), envVar); err != nil { + return false + } + return len(envVar.Status.Variables) > 0 + }, 30*time.Second, 5*time.Second).Should(BeTrue(), "Failed to get EnvVar %s", klog.KObj(envVar)) + + Byf("Setting test variables for %s", specName) + for k, v := range envVar.Status.Variables { + // ignore variables that will be set later on by the test + if sets.New("NAMESPACE", "CLUSTER_NAME", "KUBERNETES_VERSION", "CONTROL_PLANE_MACHINE_COUNT", "WORKER_MACHINE_COUNT", "VSPHERE_SSH_AUTHORIZED_KEY").Has(k) { + continue + } - for k, v := range vCenterSimulator.GovmomiVariables() { // unset corresponding env variable (that in CI contains VMC data), so we are sure we use the value for vcsim if strings.HasPrefix(k, "VSPHERE_") { Expect(os.Unsetenv(k)).To(Succeed()) @@ -95,17 +143,14 @@ func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() testSpecificVariables[k] = v } + } - // variables for govmomi mode derived from envVar.Spec.Cluster - // NOTE: picking Datacenter, Cluster, Datastore that exists by default in vcsim - clusterEnvVarSpec := vcsimv1.ClusterEnvVarSpec{ - Datacenter: ptr.To[int32](0), // DC0 - Cluster: ptr.To[int32](0), // C0 - Datastore: ptr.To[int32](0), // LocalDS_0 - } + if testMode == SupervisorTestMode { + postNamespaceCreatedFunc = setupNamespaceWithVMOperatorDependencies - for k, v := range clusterEnvVarSpec.GovmomiVariables() { - testSpecificVariables[k] = v + // Update the CLUSTER_CLASS_NAME variable adding the supervisor suffix. + if e2eConfig.HasVariable("CLUSTER_CLASS_NAME") { + testSpecificVariables["CLUSTER_CLASS_NAME"] = fmt.Sprintf("%s-supervisor", e2eConfig.GetVariable("CLUSTER_CLASS_NAME")) } } @@ -134,7 +179,52 @@ func Setup(specName string, f func(testSpecificClusterctlConfigPathGetter func() // so when the test is executed the func could get the value set into the BeforeEach block above. // If instead we pass the value directly, the test func will get the value at the moment of the initial parsing of // the Ginkgo node tree, which is an empty string (the BeforeEach block above are not run during initial parsing). - f(func() string { return testSpecificClusterctlConfigPath }) + f(func() testSettings { + return testSettings{ + ClusterctlConfigPath: testSpecificClusterctlConfigPath, + PostNamespaceCreatedFunc: postNamespaceCreatedFunc, + FlavorForMode: func(flavor string) string { + if testMode == SupervisorTestMode { + // This assumes all the supervisor flavors have the name of the corresponding govmomi flavor + "-supervisor" suffix + if flavor == "" { + return "supervisor" + } + return fmt.Sprintf("%s-supervisor", flavor) + } + return flavor + }, + } + }) +} + +func setupNamespaceWithVMOperatorDependencies(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) { + c := managementClusterProxy.GetClient() + + vCenterSimulator, err := vspherevcsim.Get(ctx, bootstrapClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred(), "Failed to get VCenterSimulator") + + Byf("Creating VMOperatorDependencies %s", klog.KRef(workloadClusterNamespace, "vcsim")) + dependenciesConfig := &vcsimv1.VMOperatorDependencies{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vcsim", + Namespace: workloadClusterNamespace, + }, + Spec: vcsimv1.VMOperatorDependenciesSpec{ + VCenterSimulatorRef: &vcsimv1.NamespacedRef{ + Namespace: vCenterSimulator.Namespace, + Name: vCenterSimulator.Name, + }, + }, + } + err = c.Create(ctx, dependenciesConfig) + Expect(err).ToNot(HaveOccurred(), "Failed to create VMOperatorDependencies") + + Eventually(func() bool { + if err := c.Get(ctx, crclient.ObjectKeyFromObject(dependenciesConfig), dependenciesConfig); err != nil { + return false + } + return dependenciesConfig.Status.Ready + }, 30*time.Second, 5*time.Second).Should(BeTrue(), "Failed to get VMOperatorDependencies on namespace %s", workloadClusterNamespace) } // Note: Copy-paste from CAPI below. diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 2d557d7c6f..b6b00ffbc8 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -26,7 +26,6 @@ import ( "testing" . "github.com/onsi/ginkgo/v2" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -37,7 +36,6 @@ import ( . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" capiutil "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/yaml" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vsphereframework "sigs.k8s.io/cluster-api-provider-vsphere/test/framework" @@ -128,11 +126,6 @@ var ( vcsimAddressManager vsphereip.AddressManager ) -type configOverrides struct { - Variables map[string]string `json:"variables,omitempty"` - Intervals map[string][]string `json:"intervals,omitempty"` -} - func init() { flag.StringVar(&configPath, "e2e.config", "", "path to the e2e config file") flag.StringVar(&configOverridesPath, "e2e.config-overrides", "", "path to the e2e config file containing overrides to the e2e config file") @@ -141,7 +134,6 @@ func init() { flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") flag.StringVar(&e2eIPAMKubeconfig, "e2e.ipam-kubeconfig", "", "path to the kubeconfig for the IPAM cluster") - flag.StringVar(&testMode, "e2e.capv-mode", GovmomiTestMode, "defines how CAPV should behave during this test, one of govmomi|supervisor") } func TestE2E(t *testing.T) { @@ -168,31 +160,16 @@ func TestE2E(t *testing.T) { // fetch the current config suiteConfig, reporterConfig := GinkgoConfiguration() - // vcsim testd currently have a couple of limitations: - // - they can't be run together with other tests, because the vcsim controller will interfere with objects - // created by other tests. - // - in order to trick clusterctl to install the vcsim controller, it is defined as another infra provider, - // and thus the tests needs to be explicit about using vsphere as target infra provider, but not all the - // tests allows this option. - // - // In order to deal with this nicely, we detect if we are running a vcsim test or not, and edit the test suite and - // how do we setup the test accordingly. + // Detect test target. testTarget = VCenterTestTarget if strings.Contains(strings.Join(suiteConfig.FocusStrings, " "), "\\[vcsim\\]") { testTarget = VCSimTestTarget } - // Automatically skip vcsim tests if not explicitly required. - // NOTE: This prevents to edit all the job configurations for non vcsim tests adding skip [vcsim] - if testTarget != VCSimTestTarget { - suiteConfig.SkipStrings = append(suiteConfig.SkipStrings, "\\[vcsim\\]") - } - - report := PreviewSpecs("capv-e2e", suiteConfig, reporterConfig) - for _, s := range report.SpecReports { - if s.State == types.SpecStatePassed { - fmt.Println(s.LeafNodeText, s.ContainerHierarchyTexts) - } + // Detect test mode. + testMode = GovmomiTestMode + if strings.Contains(strings.Join(suiteConfig.FocusStrings, " "), "\\[supervisor\\]") { + testMode = SupervisorTestMode } RunSpecs(t, "capv-e2e", suiteConfig, reporterConfig) @@ -202,6 +179,8 @@ func TestE2E(t *testing.T) { // The local clusterctl repository & the bootstrap cluster are created once and shared across all the tests. var _ = SynchronizedBeforeSuite(func() []byte { // Before all ParallelNodes. + Byf("TestTarget: %s\n", testTarget) + Byf("TestMode: %s\n", testMode) Expect(configPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) //nolint:gosec // Non-production code @@ -211,12 +190,9 @@ var _ = SynchronizedBeforeSuite(func() []byte { Byf("Loading the e2e test configuration from %q", configPath) var err error - e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath) + e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath, configOverridesPath, testTarget, testMode) Expect(err).NotTo(HaveOccurred()) - // Add config overrides + drop vcsim relates provider/image if not necessary. - amendE2EConfig() - Byf("Creating a clusterctl local repository into %q", artifactFolder) clusterctlConfigPath, err = vsphereframework.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactFolder, "repository"), true) Expect(err).NotTo(HaveOccurred()) @@ -245,24 +221,28 @@ var _ = SynchronizedBeforeSuite(func() []byte { strings.Join([]string{ artifactFolder, configPath, + configOverridesPath, + testTarget, + testMode, clusterctlConfigPath, bootstrapClusterProxy.GetKubeconfigPath(), strings.Join(ipClaimLabelsRaw, ";"), - testTarget, }, ","), ) }, func(data []byte) { // Before each ParallelNode. parts := strings.Split(string(data), ",") - Expect(parts).To(HaveLen(6)) + Expect(parts).To(HaveLen(8)) artifactFolder = parts[0] configPath = parts[1] - clusterctlConfigPath = parts[2] - kubeconfigPath := parts[3] - ipClaimLabelsRaw := parts[4] - testTarget = parts[5] + configOverridesPath = parts[2] + testTarget = parts[3] + testMode = parts[4] + clusterctlConfigPath = parts[5] + kubeconfigPath := parts[6] + ipClaimLabelsRaw := parts[7] namespaces = map[*corev1.Namespace]context.CancelFunc{} @@ -275,12 +255,9 @@ var _ = SynchronizedBeforeSuite(func() []byte { } var err error - e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath) + e2eConfig, err = vsphereframework.LoadE2EConfig(ctx, configPath, configOverridesPath, testTarget, testMode) Expect(err).NotTo(HaveOccurred()) - // Add config overrides + drop vcsim relates provider/image if not necessary. - amendE2EConfig() - bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), framework.WithMachineLogCollector(vspherelog.MachineLogCollector{})) ipClaimLabels := map[string]string{} @@ -347,47 +324,6 @@ func initScheme() *runtime.Scheme { return sc } -func amendE2EConfig() { - // If defined, load configOverrides. - // This can be used e.g. when working with a custom vCenter server for local testing (instead of the one in VMC used in CI). - if configOverridesPath != "" { - Expect(configOverridesPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config-overrides should be an existing file.") - - Byf("Merging with e2e config overrides from %q", configOverridesPath) - configData, err := os.ReadFile(configOverridesPath) //nolint:gosec - Expect(err).ToNot(HaveOccurred(), "Failed to read e2e config overrides") - Expect(configData).ToNot(BeEmpty(), "The e2e config overrides should not be empty") - - configOverrides := &configOverrides{} - Expect(yaml.Unmarshal(configData, configOverrides)).To(Succeed(), "Failed to convert e2e config overrides to yaml") - - for k, v := range configOverrides.Variables { - e2eConfig.Variables[k] = v - } - for k, v := range configOverrides.Intervals { - e2eConfig.Intervals[k] = v - } - } - - if testTarget == VCenterTestTarget { - // In case we are not testing vcsim, then drop the vcsim controller from providers and images. - // This ensures that all the tests not yet allowing to explicitly set vsphere as target infra provider keep working. - for i := range e2eConfig.Providers { - if e2eConfig.Providers[i].Name == "vcsim" { - e2eConfig.Providers = append(e2eConfig.Providers[:i], e2eConfig.Providers[i+1:]...) - break - } - } - - for i := range e2eConfig.Images { - if strings.Contains(e2eConfig.Images[i].Name, "cluster-api-vcsim-controller") { - e2eConfig.Images = append(e2eConfig.Images[:i], e2eConfig.Images[i+1:]...) - break - } - } - } -} - func setupSpecNamespace(specName string) *corev1.Namespace { Byf("Creating a namespace for hosting the %q test spec", specName) namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ diff --git a/test/e2e/gpu_pci_passthrough_test.go b/test/e2e/gpu_pci_passthrough_test.go index d6016f2018..e0bce26028 100644 --- a/test/e2e/gpu_pci_passthrough_test.go +++ b/test/e2e/gpu_pci_passthrough_test.go @@ -33,7 +33,7 @@ import ( var _ = Describe("Cluster creation with GPU devices as PCI passthrough [specialized-infra]", func() { const specName = "gpu-pci" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { var ( namespace *corev1.Namespace ) @@ -49,10 +49,10 @@ var _ = Describe("Cluster creation with GPU devices as PCI passthrough [speciali ClusterProxy: bootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, - Flavor: "pci", + Flavor: testSpecificSettingsGetter().FlavorForMode("pci"), Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), diff --git a/test/e2e/hardware_upgrade_test.go b/test/e2e/hardware_upgrade_test.go index e19bf14f5b..c65617468d 100644 --- a/test/e2e/hardware_upgrade_test.go +++ b/test/e2e/hardware_upgrade_test.go @@ -44,7 +44,7 @@ type HardwareUpgradeSpecInput struct { var _ = Describe("Hardware version upgrade", func() { const specName = "hw-upgrade" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { var ( namespace *corev1.Namespace ) @@ -73,7 +73,7 @@ var _ = Describe("Hardware version upgrade", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/e2e/ipam_test.go b/test/e2e/ipam_test.go index 440b8f1e28..85372cfccd 100644 --- a/test/e2e/ipam_test.go +++ b/test/e2e/ipam_test.go @@ -24,15 +24,16 @@ import ( var _ = Describe("ClusterClass Creation using Cluster API quick-start test and IPAM Provider [ClusterClass]", func() { const specName = "ipam-cluster-class" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: ptr.To("ipam"), + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("ipam")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, ControlPlaneMachineCount: ptr.To[int64](1), WorkerMachineCount: ptr.To[int64](1), } diff --git a/test/e2e/k8s_conformance_test.go b/test/e2e/k8s_conformance_test.go index d6061f662b..cd5ab9c0e7 100644 --- a/test/e2e/k8s_conformance_test.go +++ b/test/e2e/k8s_conformance_test.go @@ -28,15 +28,16 @@ import ( var _ = Describe("When testing K8S conformance [Conformance] [K8s-Install]", func() { // Note: This installs a cluster based on KUBERNETES_VERSION and runs conformance tests. const specName = "k8s-conformance" // copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.K8SConformanceSpec(ctx, func() capi_e2e.K8SConformanceSpecInput { return capi_e2e.K8SConformanceSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: "conformance", + Flavor: testSpecificSettingsGetter().FlavorForMode("conformance"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) @@ -48,20 +49,21 @@ var _ = Describe("When testing K8S conformance with K8S latest ci [Conformance] // KUBERNETES_VERSION env var. This only works without side effects on other tests because we are // running this test in its separate job. const specName = "k8s-conformance-ci-latest" // prefix copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.K8SConformanceSpec(ctx, func() capi_e2e.K8SConformanceSpecInput { kubernetesVersion, err := kubernetesversions.ResolveVersion(ctx, e2eConfig.GetVariable("KUBERNETES_VERSION_LATEST_CI")) Expect(err).NotTo(HaveOccurred()) Expect(os.Setenv("KUBERNETES_VERSION", kubernetesVersion)).To(Succeed()) return capi_e2e.K8SConformanceSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, // Note: install-on-bootstrap will install Kubernetes on bootstrap if the correct Kubernetes version // cannot be detected. This is required to install versions we don't have images for (e.g. ci/latest-1.30). - Flavor: "install-on-bootstrap", + Flavor: testSpecificSettingsGetter().FlavorForMode("install-on-bootstrap"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) diff --git a/test/e2e/md_scale_test.go b/test/e2e/md_scale_test.go index f62c183917..67f7811453 100644 --- a/test/e2e/md_scale_test.go +++ b/test/e2e/md_scale_test.go @@ -19,18 +19,21 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) var _ = Describe("When testing MachineDeployment scale out/in", func() { const specName = "md-scale" // copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.MachineDeploymentScaleSpec(ctx, func() capi_e2e.MachineDeploymentScaleSpecInput { return capi_e2e.MachineDeploymentScaleSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + Flavor: testSpecificSettingsGetter().FlavorForMode(clusterctl.DefaultFlavor), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) diff --git a/test/e2e/node_drain_timeout_test.go b/test/e2e/node_drain_timeout_test.go index e070e680b5..be28fcc4c8 100644 --- a/test/e2e/node_drain_timeout_test.go +++ b/test/e2e/node_drain_timeout_test.go @@ -18,19 +18,22 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/ptr" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" ) var _ = Describe("When testing node drain timeout", func() { const specName = "node-drain" // copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.NodeDrainTimeoutSpec(ctx, func() capi_e2e.NodeDrainTimeoutSpecInput { return capi_e2e.NodeDrainTimeoutSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("node-drain")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) diff --git a/test/e2e/node_labeling_test.go b/test/e2e/node_labeling_test.go index 78e181ecb6..814b9e094d 100644 --- a/test/e2e/node_labeling_test.go +++ b/test/e2e/node_labeling_test.go @@ -39,7 +39,7 @@ type NodeLabelingSpecInput struct { var _ = Describe("Label nodes with ESXi host info", func() { const specName = "node-labeling" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { var ( namespace *corev1.Namespace ) @@ -64,7 +64,7 @@ var _ = Describe("Label nodes with ESXi host info", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/e2e/ownerrefs_finalizers_test.go b/test/e2e/ownerrefs_finalizers_test.go index f0a3942e44..4f39c8e4be 100644 --- a/test/e2e/ownerrefs_finalizers_test.go +++ b/test/e2e/ownerrefs_finalizers_test.go @@ -43,7 +43,7 @@ import ( var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with FailureDomains and ClusterIdentity", func() { const specName = "owner-reference" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { // Before running the test create the secret used by the VSphereClusterIdentity to connect to the vCenter. BeforeEach(func() { createVsphereIdentitySecret(ctx, bootstrapClusterProxy) @@ -52,11 +52,12 @@ var _ = Describe("Ensure OwnerReferences and Finalizers are resilient with Failu capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: ptr.To("ownerrefs-finalizers"), + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("ownerrefs-finalizers")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // Inject a client to use for checkClusterIdentitySecretOwnerRef checkClusterIdentitySecretOwnerRef(ctx, proxy.GetClient()) diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index c4b52f097e..3240a3a42f 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -20,34 +20,38 @@ import ( . "github.com/onsi/ginkgo/v2" "k8s.io/utils/ptr" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) -var _ = Describe("Cluster Creation using Cluster API quick-start test", func() { +var _ = Describe("Cluster Creation using Cluster API quick-start test [vcsim] [supervisor]", func() { const specName = "quick-start" // copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode(clusterctl.DefaultFlavor)), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) }) -var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-Blocking] [ClusterClass]", func() { +var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-Blocking] [ClusterClass] [vcsim] [supervisor]", func() { const specName = "quick-start-cluster-class" // prefix (quick-start) copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: ptr.To("topology"), + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("topology")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) @@ -55,48 +59,16 @@ var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-B var _ = Describe("Cluster creation with [Ignition] bootstrap [PR-Blocking]", func() { const specName = "quick-start-ignition" // prefix (quick-start) copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: ptr.To("ignition"), - } - }) - }) -}) - -var _ = Describe("Cluster Creation using Cluster API quick-start test on vcsim [vcsim]", func() { - const specName = "quick-start-vcsim" // prefix (quick-start) copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { - capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { - return capi_e2e.QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InfrastructureProvider: ptr.To("vsphere"), - } - }) - }) -}) - -var _ = Describe("ClusterClass Creation using Cluster API quick-start test on vcsim [vcsim] [ClusterClass]", func() { - const specName = "quick-start-cluster-class-vcsim" // prefix (quick-start) copied from CAPI - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { - capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { - return capi_e2e.QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: ptr.To("topology"), - InfrastructureProvider: ptr.To("vsphere"), + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("ignition")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, } }) }) diff --git a/test/e2e/storage_policy_test.go b/test/e2e/storage_policy_test.go index a1a0839f56..6e341e79bb 100644 --- a/test/e2e/storage_policy_test.go +++ b/test/e2e/storage_policy_test.go @@ -45,7 +45,7 @@ type StoragePolicySpecInput struct { var _ = Describe("Cluster creation with storage policy", func() { const specName = "storage-policy" - Setup(specName, func(testSpecificClusterctlConfigPathGetter func() string) { + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { var namespace *corev1.Namespace BeforeEach(func() { @@ -69,7 +69,7 @@ var _ = Describe("Cluster creation with storage policy", func() { }, Global: GlobalInput{ BootstrapClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: testSpecificClusterctlConfigPathGetter(), + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, E2EConfig: e2eConfig, ArtifactFolder: artifactFolder, }, diff --git a/test/framework/framework.go b/test/framework/framework.go index 729151c851..2f20ca26f7 100644 --- a/test/framework/framework.go +++ b/test/framework/framework.go @@ -23,23 +23,116 @@ import ( "fmt" "os" "path/filepath" + "strings" + . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" + . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" + "sigs.k8s.io/yaml" ) type ProviderConfig clusterctl.ProviderConfig // Util functions to interact with the clusterctl e2e framework. -func LoadE2EConfig(ctx context.Context, configPath string) (*clusterctl.E2EConfig, error) { +type configOverrides struct { + Variables map[string]string `json:"variables,omitempty"` + Intervals map[string][]string `json:"intervals,omitempty"` +} + +func LoadE2EConfig(ctx context.Context, configPath string, configOverridesPath, testTarget, testMode string) (*clusterctl.E2EConfig, error) { config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) if config == nil { return nil, fmt.Errorf("cannot load E2E config found at %s", configPath) } + + // If defined, load configOverrides. + // This can be used e.g. when working with a custom vCenter server for local testing (instead of the one in VMC used in CI). + if configOverridesPath != "" { + Expect(configOverridesPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config-overrides should be an existing file.") + + Byf("Merging with e2e config overrides from %q", configOverridesPath) + configData, err := os.ReadFile(configOverridesPath) //nolint:gosec + Expect(err).ToNot(HaveOccurred(), "Failed to read e2e config overrides") + Expect(configData).ToNot(BeEmpty(), "The e2e config overrides should not be empty") + + configOverrides := &configOverrides{} + Expect(yaml.Unmarshal(configData, configOverrides)).To(Succeed(), "Failed to convert e2e config overrides to yaml") + + for k, v := range configOverrides.Variables { + config.Variables[k] = v + } + for k, v := range configOverrides.Intervals { + config.Intervals[k] = v + } + } + + if testTarget == "vcenter" { + // In case we are not testing vcsim, then drop the vcsim controller from providers and images. + // This ensures that all the tests not yet allowing to explicitly set vsphere as target infra provider keep working. + Byf("Dropping vcsim provider from the e2e config") + for i := range config.Providers { + if config.Providers[i].Name == "vcsim" { + config.Providers = append(config.Providers[:i], config.Providers[i+1:]...) + break + } + } + + for i := range config.Images { + if strings.Contains(config.Images[i].Name, "cluster-api-vcsim-controller") { + config.Images = append(config.Images[:i], config.Images[i+1:]...) + break + } + } + } else { + // In case we are testing with vcsim, then drop the in-cluster ipam provider from providers and images. + Byf("Dropping in-cluster provider from the e2e config") + for i := range config.Providers { + if config.Providers[i].Name == "in-cluster" { + config.Providers = append(config.Providers[:i], config.Providers[i+1:]...) + break + } + } + + for i := range config.Images { + if strings.Contains(config.Images[i].Name, "cluster-api-ipam-in-cluster-controller") { + config.Images = append(config.Images[:i], config.Images[i+1:]...) + break + } + } + } + + if testMode == "govmomi" { + // In case we are not testing supervisor, then drop the vm-operator controller from providers and images. + Byf("Dropping vm-operator from the e2e config") + for i := range config.Providers { + if config.Providers[i].Name == "vm-operator" { + config.Providers = append(config.Providers[:i], config.Providers[i+1:]...) + break + } + } + + for i := range config.Images { + if strings.Contains(config.Images[i].Name, "vm-operator") { + config.Images = append(config.Images[:i], config.Images[i+1:]...) + break + } + } + } else { + // In case we are testing supervisor, change the folder we build manifest from + Byf("Overriding source folder for vsphere provider to /config/supervisor in the e2e config") + for i := range config.Providers { + if config.Providers[i].Name == "vsphere" { + config.Providers[i].Versions[0].Value = strings.ReplaceAll(config.Providers[i].Versions[0].Value, "/config/default", "/config/supervisor") + break + } + } + } + return config, nil } @@ -91,11 +184,12 @@ func SetupBootstrapCluster(ctx context.Context, config *clusterctl.E2EConfig, sc func InitBootstrapCluster(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) { clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{ - ClusterProxy: bootstrapClusterProxy, - ClusterctlConfigPath: clusterctlConfig, - InfrastructureProviders: config.InfrastructureProviders(), - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), - IPAMProviders: config.IPAMProviders(), + ClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfig, + InfrastructureProviders: config.InfrastructureProviders(), + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + IPAMProviders: config.IPAMProviders(), + RuntimeExtensionProviders: config.RuntimeExtensionProviders(), }, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) } diff --git a/test/framework/ip/vcsim.go b/test/framework/ip/vcsim.go index 02a69b8a08..117b1ede44 100644 --- a/test/framework/ip/vcsim.go +++ b/test/framework/ip/vcsim.go @@ -66,7 +66,8 @@ func (h *vcsim) ClaimIPs(ctx context.Context, opts ...ClaimOption) (AddressClaim ipAddressClaims := AddressClaims{} // Claim an IP per variable. - for _, variable := range append(options.additionalIPVariableNames, ControlPlaneEndpointIPVariable) { + // NOTE: the code calling this method assumes ControlPlaneEndpointIP is the first claim in the list. + for _, variable := range append([]string{ControlPlaneEndpointIPVariable}, options.additionalIPVariableNames...) { ip, port, ipAddressClaim, err := h.claimIPAddress(ctx) Expect(err).ToNot(HaveOccurred()) ipAddressClaims = append(ipAddressClaims, AddressClaim{ diff --git a/test/framework/vmoperator/vmoperator.go b/test/framework/vmoperator/vmoperator.go index 39cfc83225..c5940234a3 100644 --- a/test/framework/vmoperator/vmoperator.go +++ b/test/framework/vmoperator/vmoperator.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) const DefaultNamespace = "vmware-system-vmop" @@ -65,108 +66,68 @@ const ( caFilePathKey = "CAFilePath" ) -type ContentLibraryItemFilesConfig struct { - Name string - Content []byte -} - -type ContentLibraryItemConfig struct { - Name string - Files []ContentLibraryItemFilesConfig - ItemType string - ProductInfo string - OSInfo string -} - -type ContentLibraryConfig struct { - Name string - Datastore string - Item ContentLibraryItemConfig -} - -type VCenterClusterConfig struct { - ServerURL string - Username string - Password string - Thumbprint string - - // supervisor is based on a single vCenter cluster - Datacenter string - Cluster string - Folder string - ResourcePool string - StoragePolicy string - ContentLibrary ContentLibraryConfig -} - -type UserNamespaceConfig struct { - Name string - StorageClass string - VirtualMachineClass string -} - -// Dependencies models dependencies for the vm-operator. -type Dependencies struct { - // This is the namespace where is deployed the vm-operator - Namespace string - - // Info about the vCenter cluster the vm-operator is bound to - VCenterCluster VCenterClusterConfig - - // Info about where the users are expected to store Cluster API clusters to be managed by the vm-operator - UserNamespace UserNamespaceConfig -} - -func (d *Dependencies) Variables() map[string]string { - return map[string]string{ - "VSPHERE_STORAGE_POLICY": d.VCenterCluster.StoragePolicy, - "VSPHERE_IMAGE_NAME": d.VCenterCluster.ContentLibrary.Item.Name, - "VSPHERE_STORAGE_CLASS": d.UserNamespace.StorageClass, - "VSPHERE_MACHINE_CLASS_NAME": d.UserNamespace.VirtualMachineClass, - } -} - // ReconcileDependencies reconciles dependencies for the vm-operator. // NOTE: This func is idempotent, it creates objects if missing otherwise it uses existing ones // (this will allow e.g. to update images once and re-use for many test run). -func ReconcileDependencies(ctx context.Context, c client.Client, config *Dependencies) error { +func ReconcileDependencies(ctx context.Context, c client.Client, dependenciesConfig *vcsimv1.VMOperatorDependencies) error { log := ctrl.LoggerFrom(ctx) log.Info("Reconciling dependencies for the VMOperator Deployment") + config := dependenciesConfig.DeepCopy() + + // If we are using a VCenterSimulator, read it build a config.Spec.VCenter for it (so the code below can assume Spec.VCenter is always set). + // Also, add default storage and vm class for vcsim in not otherwise specified. + if config.Spec.VCenterSimulatorRef != nil { + vCenterSimulator := &vcsimv1.VCenterSimulator{} + if err := c.Get(ctx, client.ObjectKey{ + Namespace: config.Spec.VCenterSimulatorRef.Namespace, + Name: config.Spec.VCenterSimulatorRef.Name, + }, vCenterSimulator); err != nil { + return errors.Wrapf(err, "failed to get vCenterSimulator %s", klog.KRef(config.Spec.VCenterSimulatorRef.Namespace, config.Spec.VCenterSimulatorRef.Name)) + } + + config.SetVCenterFromVCenterSimulator(vCenterSimulator) + } + + // default the OperatorRef if not specified. + if config.Spec.OperatorRef == nil { + config.Spec.OperatorRef = &vcsimv1.VMOperatorRef{Namespace: DefaultNamespace} + } + // Get a Client to VCenter and get holds on the relevant objects that should already exist params := session.NewParams(). - WithServer(config.VCenterCluster.ServerURL). - WithThumbprint(config.VCenterCluster.Thumbprint). - WithUserInfo(config.VCenterCluster.Username, config.VCenterCluster.Password) + WithServer(config.Spec.VCenter.ServerURL). + WithThumbprint(config.Spec.VCenter.Thumbprint). + WithUserInfo(config.Spec.VCenter.Username, config.Spec.VCenter.Password) s, err := session.GetOrCreate(ctx, params) if err != nil { return errors.Wrapf(err, "failed to connect to vCenter Server instance to read dependency references") } - datacenter, err := s.Finder.Datacenter(ctx, config.VCenterCluster.Datacenter) + datacenter, err := s.Finder.Datacenter(ctx, config.Spec.VCenter.Datacenter) if err != nil { - return errors.Wrapf(err, "failed to get datacenter %s", config.VCenterCluster.Datacenter) + return errors.Wrapf(err, "failed to get datacenter %s", config.Spec.VCenter.Datacenter) } - cluster, err := s.Finder.ClusterComputeResource(ctx, config.VCenterCluster.Cluster) + cluster, err := s.Finder.ClusterComputeResource(ctx, config.Spec.VCenter.Cluster) if err != nil { - return errors.Wrapf(err, "failed to get cluster %s", config.VCenterCluster.Cluster) + return errors.Wrapf(err, "failed to get cluster %s", config.Spec.VCenter.Cluster) } - folder, err := s.Finder.Folder(ctx, config.VCenterCluster.Folder) + folder, err := s.Finder.Folder(ctx, config.Spec.VCenter.Folder) if err != nil { - return errors.Wrapf(err, "failed to get folder %s", config.VCenterCluster.Folder) + return errors.Wrapf(err, "failed to get folder %s", config.Spec.VCenter.Folder) } - resourcePool, err := s.Finder.ResourcePool(ctx, config.VCenterCluster.ResourcePool) + resourcePool, err := s.Finder.ResourcePool(ctx, config.Spec.VCenter.ResourcePool) if err != nil { - return errors.Wrapf(err, "failed to get resourcePool %s", config.VCenterCluster.ResourcePool) + return errors.Wrapf(err, "failed to get resourcePool %s", config.Spec.VCenter.ResourcePool) } - contentLibraryDatastore, err := s.Finder.Datastore(ctx, config.VCenterCluster.ContentLibrary.Datastore) + contentLibraryDatastore, err := s.Finder.Datastore(ctx, config.Spec.VCenter.ContentLibrary.Datastore) if err != nil { - return errors.Wrapf(err, "failed to get contentLibraryDatastore %s", config.VCenterCluster.ContentLibrary.Datastore) + return errors.Wrapf(err, "failed to get contentLibraryDatastore %s", config.Spec.VCenter.ContentLibrary.Datastore) } pbmClient, err := pbm.NewClient(ctx, s.Client.Client) @@ -174,60 +135,60 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende return errors.Wrap(err, "failed to get storage policy client") } - storagePolicyID, err := pbmClient.ProfileIDByName(ctx, config.VCenterCluster.StoragePolicy) + storagePolicyID, err := pbmClient.ProfileIDByName(ctx, config.Spec.VCenter.StoragePolicy) if err != nil { - return errors.Wrapf(err, "failed to get storage policy profile %s", config.VCenterCluster.StoragePolicy) + return errors.Wrapf(err, "failed to get storage policy profile %s", config.Spec.VCenter.StoragePolicy) } - // Create StorageClass & bind it to the user namespace via a ResourceQuota + // Create StorageClasses & bind them to the user namespace via a ResourceQuota // NOTE: vm-operator is using the ResourceQuota to figure out which StorageClass can be used from a namespace - // TODO: consider if we want to support more than one storage class - - storageClass := &storagev1.StorageClass{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: config.UserNamespace.StorageClass, - }, - Provisioner: "kubernetes.io/vsphere-volume", - Parameters: map[string]string{ - "storagePolicyID": storagePolicyID, - }, - } - - if err := c.Get(ctx, client.ObjectKeyFromObject(storageClass), storageClass); err != nil { - if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get vm-operator StorageClass %s", storageClass.Name) + for _, sc := range config.Spec.StorageClasses { + storageClass := &storagev1.StorageClass{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: sc, + }, + Provisioner: "kubernetes.io/vsphere-volume", + Parameters: map[string]string{ + "storagePolicyID": storagePolicyID, + }, } - if err := c.Create(ctx, storageClass); err != nil { - return errors.Wrapf(err, "failed to create vm-operator StorageClass %s", storageClass.Name) + if err := c.Get(ctx, client.ObjectKeyFromObject(storageClass), storageClass); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator StorageClass %s", storageClass.Name) + } + + if err := c.Create(ctx, storageClass); err != nil { + return errors.Wrapf(err, "failed to create vm-operator StorageClass %s", storageClass.Name) + } + log.Info("Created vm-operator StorageClass", "StorageClass", klog.KObj(storageClass)) } - log.Info("Created vm-operator StorageClass", "StorageClass", klog.KObj(storageClass)) - } - // TODO: rethink about this, for now we are creating a ResourceQuota with the same name of the StorageClass, might be this is not ok when hooking into a real vCenter - resourceQuota := &corev1.ResourceQuota{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: config.UserNamespace.StorageClass, - Namespace: config.UserNamespace.Name, - }, - Spec: corev1.ResourceQuotaSpec{ - Hard: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceName(fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.storage", storageClass.Name)): resource.MustParse("1Gi"), + // TODO: rethink about this, for now we are creating a ResourceQuota with the same name of the StorageClass, might be this is not ok when hooking into a real vCenter + resourceQuota := &corev1.ResourceQuota{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: sc, + Namespace: config.Namespace, + }, + Spec: corev1.ResourceQuotaSpec{ + Hard: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceName(fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.storage", storageClass.Name)): resource.MustParse("1Gi"), + }, }, - }, - } - - if err := c.Get(ctx, client.ObjectKeyFromObject(resourceQuota), resourceQuota); err != nil { - if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get vm-operator ResourceQuota %s", resourceQuota.Name) } - if err := c.Create(ctx, resourceQuota); err != nil { - return errors.Wrapf(err, "failed to create vm-operator ResourceQuota %s", resourceQuota.Name) + if err := c.Get(ctx, client.ObjectKeyFromObject(resourceQuota), resourceQuota); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator ResourceQuota %s", resourceQuota.Name) + } + + if err := c.Create(ctx, resourceQuota); err != nil { + return errors.Wrapf(err, "failed to create vm-operator ResourceQuota %s", resourceQuota.Name) + } + log.Info("Created vm-operator ResourceQuota", "ResourceQuota", klog.KObj(resourceQuota)) } - log.Info("Created vm-operator ResourceQuota", "ResourceQuota", klog.KObj(resourceQuota)) } // Create Availability zones CR in K8s and bind them to the user namespace @@ -236,12 +197,12 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende availabilityZone := &topologyv1.AvailabilityZone{ ObjectMeta: metav1.ObjectMeta{ - Name: strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(strings.TrimPrefix(config.VCenterCluster.Cluster, "/")), "_", "-"), "/", "-"), + Name: strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(strings.TrimPrefix(config.Spec.VCenter.Cluster, "/")), "_", "-"), "/", "-"), }, Spec: topologyv1.AvailabilityZoneSpec{ ClusterComputeResourceMoId: cluster.Reference().Value, Namespaces: map[string]topologyv1.NamespaceInfo{ - config.UserNamespace.Name: { + config.Namespace: { PoolMoId: resourcePool.Reference().Value, FolderMoId: folder.Reference().Value, }, @@ -260,16 +221,27 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende log.Info("Created vm-operator AvailabilityZone", "AvailabilityZone", klog.KObj(availabilityZone)) } + if _, ok := availabilityZone.Spec.Namespaces[config.Namespace]; !ok { + availabilityZone.Spec.Namespaces[config.Namespace] = topologyv1.NamespaceInfo{ + PoolMoId: resourcePool.Reference().Value, + FolderMoId: folder.Reference().Value, + } + if err := c.Update(ctx, availabilityZone); err != nil { + return errors.Wrapf(err, "failed to update AvailabilityZone %s", availabilityZone.Name) + } + log.Info("Update vm-operator AvailabilityZone", "AvailabilityZone", klog.KObj(availabilityZone)) + } + // Create vm-operator Secret in K8s // This secret contains credentials to access vCenter the vm-operator acts on. secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: providerConfigMapName, // using the same name of the config map for consistency. - Namespace: config.Namespace, + Namespace: config.Spec.OperatorRef.Namespace, }, Data: map[string][]byte{ - "username": []byte(config.VCenterCluster.Username), - "password": []byte(config.VCenterCluster.Password), + "username": []byte(config.Spec.VCenter.Username), + "password": []byte(config.Spec.VCenter.Password), }, Type: corev1.SecretTypeOpaque, } @@ -286,15 +258,15 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende // Create vm-operator ConfigMap in K8s // This ConfigMap contains settings for the vm-operator instance. - host, port, err := net.SplitHostPort(config.VCenterCluster.ServerURL) + host, port, err := net.SplitHostPort(config.Spec.VCenter.ServerURL) if err != nil { - return errors.Wrapf(err, "failed to split host %s", config.VCenterCluster.ServerURL) + return errors.Wrapf(err, "failed to split host %s", config.Spec.VCenter.ServerURL) } providerConfigMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: providerConfigMapName, - Namespace: config.Namespace, + Namespace: config.Spec.OperatorRef.Namespace, }, Data: map[string]string{ caFilePathKey: "", // Leaving this empty because we don't have (yet) a solution to inject a CA file into the vm-operator pod. @@ -322,47 +294,49 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende } // Create VirtualMachineClass in K8s and bind it to the user namespace - // TODO: figure out if to add more vm classes / if to make them configurable via config - vmClass := &vmoprv1.VirtualMachineClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.UserNamespace.VirtualMachineClass, - }, - Spec: vmoprv1.VirtualMachineClassSpec{ - Hardware: vmoprv1.VirtualMachineClassHardware{ - Cpus: 8, - Memory: resource.MustParse("64G"), + for _, vmc := range config.Spec.VirtualMachineClasses { + vmClass := &vmoprv1.VirtualMachineClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: vmc, + }, + Spec: vmoprv1.VirtualMachineClassSpec{ + // TODO: figure out if to make vm class configurable via API + Hardware: vmoprv1.VirtualMachineClassHardware{ + Cpus: 2, + Memory: resource.MustParse("4G"), + }, }, - }, - } - if err := c.Get(ctx, client.ObjectKeyFromObject(vmClass), vmClass); err != nil { - if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get vm-operator VirtualMachineClass %s", vmClass.Name) } - if err := c.Create(ctx, vmClass); err != nil { - return errors.Wrapf(err, "failed to create vm-operator VirtualMachineClass %s", vmClass.Name) + if err := c.Get(ctx, client.ObjectKeyFromObject(vmClass), vmClass); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator VirtualMachineClass %s", vmClass.Name) + } + if err := c.Create(ctx, vmClass); err != nil { + return errors.Wrapf(err, "failed to create vm-operator VirtualMachineClass %s", vmClass.Name) + } + log.Info("Created vm-operator VirtualMachineClass", "VirtualMachineClass", klog.KObj(vmClass)) } - log.Info("Created vm-operator VirtualMachineClass", "VirtualMachineClass", klog.KObj(vmClass)) - } - vmClassBinding := &vmoprv1.VirtualMachineClassBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: vmClass.Name, - Namespace: config.UserNamespace.Name, - }, - ClassRef: vmoprv1.ClassReference{ - APIVersion: vmoprv1.SchemeGroupVersion.String(), - Kind: "VirtualMachineClass", - Name: vmClass.Name, - }, - } - if err := c.Get(ctx, client.ObjectKeyFromObject(vmClassBinding), vmClassBinding); err != nil { - if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get vm-operator VirtualMachineClassBinding %s", vmClassBinding.Name) + vmClassBinding := &vmoprv1.VirtualMachineClassBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: vmClass.Name, + Namespace: config.Namespace, + }, + ClassRef: vmoprv1.ClassReference{ + APIVersion: vmoprv1.SchemeGroupVersion.String(), + Kind: "VirtualMachineClass", + Name: vmClass.Name, + }, } - if err := c.Create(ctx, vmClassBinding); err != nil { - return errors.Wrapf(err, "failed to create vm-operator VirtualMachineClassBinding %s", vmClassBinding.Name) + if err := c.Get(ctx, client.ObjectKeyFromObject(vmClassBinding), vmClassBinding); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator VirtualMachineClassBinding %s", vmClassBinding.Name) + } + if err := c.Create(ctx, vmClassBinding); err != nil { + return errors.Wrapf(err, "failed to create vm-operator VirtualMachineClassBinding %s", vmClassBinding.Name) + } + log.Info("Created vm-operator VirtualMachineClassBinding", "VirtualMachineClassBinding", klog.KObj(vmClassBinding)) } - log.Info("Created vm-operator VirtualMachineClassBinding", "VirtualMachineClassBinding", klog.KObj(vmClassBinding)) } // Create a ContentLibrary in K8s and in vCenter, bind it to the K8s namespace @@ -371,14 +345,14 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende // - k8s: ContentLibraryProvider, ContentSource (both representing the library), a VirtualMachineImage (representing the Item) restClient := rest.NewClient(s.Client.Client) - if err := restClient.Login(ctx, url.UserPassword(config.VCenterCluster.Username, config.VCenterCluster.Password)); err != nil { + if err := restClient.Login(ctx, url.UserPassword(config.Spec.VCenter.Username, config.Spec.VCenter.Password)); err != nil { return errors.Wrap(err, "failed to login using the rest client") } libMgr := library.NewManager(restClient) contentLibrary := library.Library{ - Name: config.VCenterCluster.ContentLibrary.Name, + Name: config.Spec.VCenter.ContentLibrary.Name, Type: "LOCAL", Storage: []library.StorageBackings{ { @@ -435,7 +409,7 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende contentSourceBinding := &vmoprv1.ContentSourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: contentLibraryID, - Namespace: config.UserNamespace.Name, + Namespace: config.Namespace, }, ContentSourceRef: vmoprv1.ContentSourceReference{ APIVersion: vmoprv1.SchemeGroupVersion.String(), @@ -475,111 +449,114 @@ func ReconcileDependencies(ctx context.Context, c client.Client, config *Depende log.Info("Created vm-operator ContentLibraryProvider", "ContentSource", klog.KObj(contentSource), "ContentLibraryProvider", klog.KObj(contentLibraryProvider)) } - libraryItem := library.Item{ - Name: config.VCenterCluster.ContentLibrary.Item.Name, - Type: config.VCenterCluster.ContentLibrary.Item.ItemType, - LibraryID: contentLibraryID, - } + for _, item := range config.Spec.VCenter.ContentLibrary.Items { + libraryItem := library.Item{ + Name: item.Name, + Type: item.ItemType, + LibraryID: contentLibraryID, + } - items, err := libMgr.GetLibraryItems(ctx, contentLibraryID) - if err != nil { - return errors.Wrap(err, "failed to get ContentLibraryItems") - } + items, err := libMgr.GetLibraryItems(ctx, contentLibraryID) + if err != nil { + return errors.Wrap(err, "failed to get ContentLibraryItems") + } - var libraryItemID string - for _, item := range items { - if item.Name == libraryItem.Name { - libraryItemID = item.ID - break + var libraryItemID string + for _, i := range items { + if i.Name == libraryItem.Name { + libraryItemID = i.ID + break + } } - } - if libraryItemID == "" { - id, err := libMgr.CreateLibraryItem(ctx, libraryItem) - if err != nil { - return errors.Wrapf(err, "failed to create vm-operator ContentLibraryItem %s", libraryItem.Name) + if libraryItemID == "" { + id, err := libMgr.CreateLibraryItem(ctx, libraryItem) + if err != nil { + return errors.Wrapf(err, "failed to create vm-operator ContentLibraryItem %s", libraryItem.Name) + } + log.Info("Created vm-operator LibraryItem in vCenter", "ContentLibrary", contentLibrary.Name, "LibraryItem", libraryItem.Name) + libraryItemID = id } - log.Info("Created vm-operator LibraryItem in vCenter", "ContentLibrary", contentLibrary.Name, "LibraryItem", libraryItem.Name) - libraryItemID = id - } - virtualMachineImage := &vmoprv1.VirtualMachineImage{ - ObjectMeta: metav1.ObjectMeta{ - Name: libraryItem.Name, - }, - Spec: vmoprv1.VirtualMachineImageSpec{ - ProductInfo: vmoprv1.VirtualMachineImageProductInfo{ - FullVersion: config.VCenterCluster.ContentLibrary.Item.ProductInfo, + virtualMachineImage := &vmoprv1.VirtualMachineImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: libraryItem.Name, }, - OSInfo: vmoprv1.VirtualMachineImageOSInfo{ - Type: config.VCenterCluster.ContentLibrary.Item.OSInfo, + Spec: vmoprv1.VirtualMachineImageSpec{ + ProductInfo: vmoprv1.VirtualMachineImageProductInfo{ + FullVersion: item.ProductInfo, + }, + OSInfo: vmoprv1.VirtualMachineImageOSInfo{ + Type: item.OSInfo, + }, }, - }, - } - - if err := controllerutil.SetOwnerReference(contentLibraryProvider, virtualMachineImage, c.Scheme()); err != nil { - return errors.Wrap(err, "failed to set VirtualMachineImage owner") - } - if err := c.Get(ctx, client.ObjectKeyFromObject(virtualMachineImage), virtualMachineImage); err != nil { - if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get vm-operator VirtualMachineImage %s", virtualMachineImage.Name) - } - if err := c.Create(ctx, virtualMachineImage); err != nil { - return errors.Wrapf(err, "failed to create vm-operator VirtualMachineImage %s", virtualMachineImage.Name) } - log.Info("Created vm-operator VirtualMachineImage", "ContentSource", klog.KObj(contentSource), "ContentLibraryProvider", klog.KObj(contentLibraryProvider), "VirtualMachineImage", klog.KObj(virtualMachineImage)) - } - existingFiles, err := libMgr.ListLibraryItemFiles(ctx, libraryItemID) - if err != nil { - return errors.Wrapf(err, "failed to list files for vm-operator libraryItem %s", libraryItem.Name) - } - - uploadFunc := func(sessionID, file string, content []byte) error { - info := library.UpdateFile{ - Name: file, - SourceType: "PUSH", - Size: int64(len(content)), + if err := controllerutil.SetOwnerReference(contentLibraryProvider, virtualMachineImage, c.Scheme()); err != nil { + return errors.Wrap(err, "failed to set VirtualMachineImage owner") } - - update, err := libMgr.AddLibraryItemFile(ctx, sessionID, info) - if err != nil { - return err + if err := c.Get(ctx, client.ObjectKeyFromObject(virtualMachineImage), virtualMachineImage); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator VirtualMachineImage %s", virtualMachineImage.Name) + } + if err := c.Create(ctx, virtualMachineImage); err != nil { + return errors.Wrapf(err, "failed to create vm-operator VirtualMachineImage %s", virtualMachineImage.Name) + } + log.Info("Created vm-operator VirtualMachineImage", "ContentSource", klog.KObj(contentSource), "ContentLibraryProvider", klog.KObj(contentLibraryProvider), "VirtualMachineImage", klog.KObj(virtualMachineImage)) } - u, err := url.Parse(update.UploadEndpoint.URI) + existingFiles, err := libMgr.ListLibraryItemFiles(ctx, libraryItemID) if err != nil { - return err + return errors.Wrapf(err, "failed to list files for vm-operator libraryItem %s", libraryItem.Name) } - p := soap.DefaultUpload - p.ContentLength = info.Size + uploadFunc := func(sessionID, file string, content []byte) error { + info := library.UpdateFile{ + Name: file, + SourceType: "PUSH", + Size: int64(len(content)), + } - return libMgr.Client.Upload(ctx, bytes.NewReader(content), u, &p) - } + update, err := libMgr.AddLibraryItemFile(ctx, sessionID, info) + if err != nil { + return err + } - for _, file := range config.VCenterCluster.ContentLibrary.Item.Files { - exists := false - for _, existingFile := range existingFiles { - if file.Name == existingFile.Name { - exists = true + u, err := url.Parse(update.UploadEndpoint.URI) + if err != nil { + return err } - } - if exists { - continue - } - sessionID, err := libMgr.CreateLibraryItemUpdateSession(ctx, library.Session{LibraryItemID: libraryItemID}) - if err != nil { - return errors.Wrapf(err, "failed to start update session for vm-operator libraryItem %s", libraryItem.Name) - } - if err := uploadFunc(sessionID, file.Name, file.Content); err != nil { - return errors.Wrapf(err, "failed to upload data for vm-operator libraryItem %s", libraryItem.Name) + p := soap.DefaultUpload + p.ContentLength = info.Size + + return libMgr.Client.Upload(ctx, bytes.NewReader(content), u, &p) } - if err := libMgr.CompleteLibraryItemUpdateSession(ctx, sessionID); err != nil { - return errors.Wrapf(err, "failed to complete update session for vm-operator libraryItem %s", libraryItem.Name) + + for _, file := range item.Files { + exists := false + for _, existingFile := range existingFiles { + if file.Name == existingFile.Name { + exists = true + } + } + if exists { + continue + } + + sessionID, err := libMgr.CreateLibraryItemUpdateSession(ctx, library.Session{LibraryItemID: libraryItemID}) + if err != nil { + return errors.Wrapf(err, "failed to start update session for vm-operator libraryItem %s", libraryItem.Name) + } + if err := uploadFunc(sessionID, file.Name, file.Content); err != nil { + return errors.Wrapf(err, "failed to upload data for vm-operator libraryItem %s", libraryItem.Name) + } + if err := libMgr.CompleteLibraryItemUpdateSession(ctx, sessionID); err != nil { + return errors.Wrapf(err, "failed to complete update session for vm-operator libraryItem %s", libraryItem.Name) + } + log.Info("Uploaded vm-operator LibraryItemFile in vCenter", "ContentLibrary", contentLibrary.Name, "libraryItem", libraryItem.Name, "LibraryItemFile", file.Name) } - log.Info("Uploaded vm-operator LibraryItemFile in vCenter", "ContentLibrary", contentLibrary.Name, "libraryItem", libraryItem.Name, "LibraryItemFile", file.Name) } + return nil } diff --git a/test/framework/vmoperator/vmoperator_test.go b/test/framework/vmoperator/vmoperator_test.go deleted file mode 100644 index 40435803d0..0000000000 --- a/test/framework/vmoperator/vmoperator_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vmoperator - -import ( - "context" - "os" - "testing" - "time" - - . "github.com/onsi/gomega" - vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" - topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" - corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/yaml" - - vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" -) - -/* -cat << EOF > /tmp/testbed.yaml -ServerURL: "${VSPHERE_SERVER}:443" -Username: "${VSPHERE_USERNAME}" -Password: "${VSPHERE_PASSWORD}" -Thumbprint: "${VSPHERE_TLS_THUMBPRINT}" -Datacenter: "${VSPHERE_DATACENTER}" -Cluster: "/${VSPHERE_DATACENTER}/host/cluster0" -Folder: "${VSPHERE_FOLDER}" -ResourcePool: "/${VSPHERE_DATACENTER}/host/cluster0/Resources/${VSPHERE_RESOURCE_POOL}" -StoragePolicyID: "${VSPHERE_STORAGE_POLICY}" -ContentLibrary: - Name: "capv" - Datastore: "/${VSPHERE_DATACENTER}/datastore/${VSPHERE_DATASTORE}" -EOF. -*/ - -func Test_reconcileVMOperatorDeployment(t *testing.T) { - t.Skip() - - scheme := runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = storagev1.AddToScheme(scheme) - _ = vmoprv1.AddToScheme(scheme) - _ = vmwarev1.AddToScheme(scheme) - _ = topologyv1.AddToScheme(scheme) - - const ( - kubeconfigPath = "/tmp/capi-test.kubeconfig" - testbedYamlPath = "/tmp/testbed.yaml" - ) - g := NewWithT(t) - - ctx := context.Background() - - vcenterClusterConfig := VCenterClusterConfig{} - testbedData, err := os.ReadFile(testbedYamlPath) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(yaml.Unmarshal(testbedData, &vcenterClusterConfig)).ToNot(HaveOccurred()) - - config := &Dependencies{ - Namespace: "vmware-system-vmop", - UserNamespace: UserNamespaceConfig{ - Name: "default", // namespace where we deploy a cluster - StorageClass: "tkg-shared-ds-sp", - }, - VCenterCluster: vcenterClusterConfig, - } - - config.VCenterCluster.ContentLibrary.Item = ContentLibraryItemConfig{ - Name: "ubuntu-2204-kube-v1.29.0", - } - - // create a config - - // Create a client.Client from a kubeconfig - kubeconfig, err := os.ReadFile(kubeconfigPath) - g.Expect(err).ToNot(HaveOccurred()) - - restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig) - g.Expect(err).ToNot(HaveOccurred()) - - restConfig.Timeout = 10 * time.Second - - c, err := client.New(restConfig, client.Options{Scheme: scheme}) - g.Expect(err).ToNot(HaveOccurred()) - - // reconcile - err = ReconcileDependencies(ctx, c, config) - g.Expect(err).ToNot(HaveOccurred()) -} diff --git a/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go b/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go index 34257b7e3b..4a91181f73 100644 --- a/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go +++ b/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go @@ -17,18 +17,37 @@ limitations under the License. package v1alpha1 import ( - "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" ) // EnvVarSpec defines the desired state of the EnvVar. type EnvVarSpec struct { - VCenterSimulator string `json:"vCenterSimulator,omitempty"` - Cluster ClusterEnvVarSpec `json:"cluster,omitempty"` + // Name of the VCenterSimulator instance to use as source for EnvVar values. + VCenterSimulator *NamespacedRef `json:"vCenterSimulator,omitempty"` + + // Name of the ControlPlaneEndpoint instance to use as source for EnvVar values. + ControlPlaneEndpoint NamespacedRef `json:"controlPlaneEndpoint,omitempty"` + + // Cluster specific values to use as source for EnvVar values. + Cluster ClusterEnvVarSpec `json:"cluster,omitempty"` + + // Name of the VMOperatorDependencies instance to use as source for EnvVar values. + // If not specified, a default dependenciesConfig that works for vcsim is used. + // NOTE: this is required only for supervisor mode; also: + // - the system automatically picks the first StorageClass defined in the VMOperatorDependencies + // - the system automatically picks the first VirtualMachine class defined in the VMOperatorDependencies + // - the system automatically picks the first Image from the content library defined in the VMOperatorDependencies + VMOperatorDependencies *NamespacedRef `json:"vmOperatorDependencies,omitempty"` +} + +// NamespacedRef defines a reference to an object of a well known API Group and kind. +type NamespacedRef struct { + // Namespace of the referenced object. + // If empty, it defaults to the namespace of the parent object. + Namespace string `json:"namespace,omitempty"` + + // Name of the referenced object. + Name string `json:"name,omitempty"` } // ClusterEnvVarSpec defines the spec for the EnvVar generator targeting a specific Cluster API cluster. @@ -36,6 +55,9 @@ type ClusterEnvVarSpec struct { // The name of the Cluster API cluster. Name string `json:"name"` + // The namespace of the Cluster API cluster. + Namespace string `json:"namespace"` + // The Kubernetes version of the Cluster API cluster. // NOTE: This variable isn't related to the vcsim controller, but we are handling it here // in order to have a single point of control for all the variables related to a Cluster API template. @@ -103,39 +125,3 @@ type EnvVarList struct { func init() { objectTypes = append(objectTypes, &EnvVar{}, &EnvVarList{}) } - -func (c *ClusterEnvVarSpec) commonVariables() map[string]string { - return map[string]string{ - "VSPHERE_POWER_OFF_MODE": ptr.Deref(c.PowerOffMode, "trySoft"), - } -} - -// SupervisorVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing supervisor mode. -func (c *ClusterEnvVarSpec) SupervisorVariables() map[string]string { - return c.commonVariables() -} - -// GovmomiVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing govmomi mode. -func (c *ClusterEnvVarSpec) GovmomiVariables() map[string]string { - vars := c.commonVariables() - - datacenter := int(ptr.Deref(c.Datacenter, 0)) - datastore := int(ptr.Deref(c.Datastore, 0)) - cluster := int(ptr.Deref(c.Cluster, 0)) - - // Pick the template for the given Kubernetes version if any, otherwise the template for the latest - // version defined in the model. - template := vcsimhelpers.DefaultVMTemplates[len(vcsimhelpers.DefaultVMTemplates)-1] - if c.KubernetesVersion != nil { - template = fmt.Sprintf("ubuntu-2204-kube-%s", *c.KubernetesVersion) - } - - // NOTE: omitting cluster Name intentionally because E2E tests provide this value in other ways - vars["VSPHERE_DATACENTER"] = vcsimhelpers.DatacenterName(datacenter) - vars["VSPHERE_DATASTORE"] = vcsimhelpers.DatastoreName(datastore) - vars["VSPHERE_FOLDER"] = vcsimhelpers.VMFolderName(datacenter) - vars["VSPHERE_NETWORK"] = vcsimhelpers.NetworkPath(datacenter, vcsimhelpers.DefaultNetworkName) - vars["VSPHERE_RESOURCE_POOL"] = vcsimhelpers.ResourcePoolPath(datacenter, cluster) - vars["VSPHERE_TEMPLATE"] = vcsimhelpers.VMPath(datacenter, template) - return vars -} diff --git a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go index 94f9423b75..8879f52c88 100644 --- a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go +++ b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go @@ -17,12 +17,7 @@ limitations under the License. package v1alpha1 import ( - "fmt" - "net" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" ) const ( @@ -121,36 +116,3 @@ type VCenterSimulatorList struct { func init() { objectTypes = append(objectTypes, &VCenterSimulator{}, &VCenterSimulatorList{}) } - -func (v *VCenterSimulator) commonVariables() map[string]string { - host := v.Status.Host - - // NOTE: best effort reverting back to local host because the assumption is that the vcsim controller pod will be port-forwarded on local host - _, port, err := net.SplitHostPort(host) - if err == nil { - host = net.JoinHostPort("127.0.0.1", port) - } - - return map[string]string{ - "VSPHERE_PASSWORD": v.Status.Password, - "VSPHERE_USERNAME": v.Status.Username, - "VSPHERE_STORAGE_POLICY": vcsimhelpers.DefaultStoragePolicyName, - - // variables to set up govc for working with the vcsim instance. - "GOVC_URL": fmt.Sprintf("https://%s:%s@%s/sdk", v.Status.Username, v.Status.Password, host), - "GOVC_INSECURE": "true", - } -} - -// SupervisorVariables returns name/value pairs for a VCenterSimulator to be used for clusterctl templates when testing supervisor mode. -func (v *VCenterSimulator) SupervisorVariables() map[string]string { - return v.commonVariables() -} - -// GovmomiVariables returns name/value pairs for a VCenterSimulator to be used for clusterctl templates when testing govmomi mode. -func (v *VCenterSimulator) GovmomiVariables() map[string]string { - vars := v.commonVariables() - vars["VSPHERE_SERVER"] = fmt.Sprintf("https://%s", v.Status.Host) - vars["VSPHERE_TLS_THUMBPRINT"] = v.Status.Thumbprint - return vars -} diff --git a/test/infrastructure/vcsim/api/v1alpha1/vmoperatordependencies_types.go b/test/infrastructure/vcsim/api/v1alpha1/vmoperatordependencies_types.go new file mode 100644 index 0000000000..b40cf6a89d --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/vmoperatordependencies_types.go @@ -0,0 +1,176 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/images" +) + +// VMOperatorDependenciesSpec defines the desired state of the VMOperatorDependencies in +// the namespace where this object is created. +type VMOperatorDependenciesSpec struct { + // OperatorRef provides a reference to the running instance of vm-operator. + OperatorRef *VMOperatorRef `json:"operatorRef,omitempty"` + + // VCenter defines info about the vCenter instance that the vm-operator interacts with. + // Only one between this field and VCenterSimulatorRef must be set. + VCenter *VCenterSpec `json:"vCenter,omitempty"` + + // VCenterSimulatorRef defines info about the vCenter simulator instance that the vm-operator interacts with. + // Only one between this field and VCenter must be set. + VCenterSimulatorRef *NamespacedRef `json:"vCenterSimulatorRef,omitempty"` + + // StorageClasses defines a list of StorageClasses to be bound to the namespace where this object is created. + StorageClasses []string `json:"storageClasses,omitempty"` + + // VirtualMachineClasses defines a list of VirtualMachineClasses to be bound to the namespace where this object is created. + VirtualMachineClasses []string `json:"virtualMachineClasses,omitempty"` +} + +// VMOperatorRef provide a reference to the running instance of vm-operator. +type VMOperatorRef struct { + // Namespace where the vm-operator is running. + Namespace string `json:"namespace,omitempty"` +} + +// VCenterSpec defines info about the vCenter instance that the vm-operator interacts with. +type VCenterSpec struct { + ServerURL string `json:"serverURL,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Thumbprint string `json:"thumbprint,omitempty"` + + // supervisor is based on a single vCenter cluster + Datacenter string `json:"datacenter,omitempty"` + Cluster string `json:"cluster,omitempty"` + Folder string `json:"folder,omitempty"` + ResourcePool string `json:"resourcePool,omitempty"` + StoragePolicy string `json:"storagePolicy,omitempty"` + ContentLibrary ContentLibraryConfig `json:"contentLibrary,omitempty"` +} + +type ContentLibraryItemFilesConfig struct { + Name string `json:"name,omitempty"` + Content []byte `json:"content,omitempty"` + // TODO: ContentFrom a config map +} + +type ContentLibraryItemConfig struct { + Name string `json:"datacenter,omitempty"` + Files []ContentLibraryItemFilesConfig `json:"files,omitempty"` + ItemType string `json:"itemType,omitempty"` + ProductInfo string `json:"productInfo,omitempty"` + OSInfo string `json:"osInfo,omitempty"` +} + +type ContentLibraryConfig struct { + Name string `json:"name,omitempty"` + Datastore string `json:"datastore,omitempty"` + Items []ContentLibraryItemConfig `json:"items,omitempty"` +} + +// VMOperatorDependenciesStatus defines the observed state of the VMOperatorDependencies. +type VMOperatorDependenciesStatus struct { + Ready bool `json:"ready,omitempty"` +} + +// +kubebuilder:resource:path=vmoperatordependencies,scope=Namespaced,categories=cluster-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:object:root=true + +// VMOperatorDependencies is the schema for a VM operator dependencies. +type VMOperatorDependencies struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VMOperatorDependenciesSpec `json:"spec,omitempty"` + Status VMOperatorDependenciesStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VMOperatorDependenciesList contains a list of VMOperatorDependencies. +type VMOperatorDependenciesList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VCenterSimulator `json:"items"` +} + +func init() { + objectTypes = append(objectTypes, &VMOperatorDependencies{}, &VMOperatorDependenciesList{}) +} + +// SetVCenterFromVCenterSimulator sets config.Spec.VCenter for a given VCenterSimulator. +// NOTE: by default it uses cluster DC0/C0, datastore LocalDS_0 in vcsim; it also sets up a +// content library with the templates that are expected by test cluster classes. +func (d *VMOperatorDependencies) SetVCenterFromVCenterSimulator(vCenterSimulator *VCenterSimulator) { + datacenter := 0 + cluster := 0 + datastore := 0 + + d.Spec.VCenter = &VCenterSpec{ + ServerURL: vCenterSimulator.Status.Host, + Username: vCenterSimulator.Status.Username, + Password: vCenterSimulator.Status.Password, + Thumbprint: vCenterSimulator.Status.Thumbprint, + Datacenter: vcsimhelpers.DatacenterName(datacenter), + Cluster: vcsimhelpers.ClusterPath(datacenter, cluster), + Folder: vcsimhelpers.VMFolderName(datacenter), + ResourcePool: vcsimhelpers.ResourcePoolPath(datacenter, cluster), + StoragePolicy: vcsimhelpers.DefaultStoragePolicyName, + ContentLibrary: ContentLibraryConfig{ + Name: "vcsim", + Datastore: vcsimhelpers.DatastorePath(datacenter, datastore), + Items: []ContentLibraryItemConfig{ + // Items are added right below this declaration + }, + }, + } + + // Note: For the sake of testing with vcsim the template doesn't really matter (nor the version of K8s hosted on it) + // but we must provide at least the templates that are expected by test cluster classes. + for _, t := range vcsimhelpers.DefaultVMTemplates { + d.Spec.VCenter.ContentLibrary.Items = append(d.Spec.VCenter.ContentLibrary.Items, + ContentLibraryItemConfig{ + Name: t, + Files: []ContentLibraryItemFilesConfig{ + { + Name: fmt.Sprintf("%s.ovf", t), + Content: images.SampleOVF, + }, + }, + ItemType: "ovf", + ProductInfo: "dummy-productInfo", + OSInfo: "dummy-OSInfo", + }, + ) + } + + // Add default storage and vm class for vcsim in not otherwise specified. + if len(d.Spec.StorageClasses) == 0 { + d.Spec.StorageClasses = []string{"vcsim-default-storage-class"} + } + if len(d.Spec.VirtualMachineClasses) == 0 { + d.Spec.VirtualMachineClasses = []string{"vcsim-default-vm-class"} + } +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go b/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go index 2b39f4ee7e..f0b32ccebe 100644 --- a/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go +++ b/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go @@ -74,6 +74,70 @@ func (in *ClusterEnvVarSpec) DeepCopy() *ClusterEnvVarSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLibraryConfig) DeepCopyInto(out *ContentLibraryConfig) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContentLibraryItemConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLibraryConfig. +func (in *ContentLibraryConfig) DeepCopy() *ContentLibraryConfig { + if in == nil { + return nil + } + out := new(ContentLibraryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLibraryItemConfig) DeepCopyInto(out *ContentLibraryItemConfig) { + *out = *in + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]ContentLibraryItemFilesConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLibraryItemConfig. +func (in *ContentLibraryItemConfig) DeepCopy() *ContentLibraryItemConfig { + if in == nil { + return nil + } + out := new(ContentLibraryItemConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLibraryItemFilesConfig) DeepCopyInto(out *ContentLibraryItemFilesConfig) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]byte, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLibraryItemFilesConfig. +func (in *ContentLibraryItemFilesConfig) DeepCopy() *ContentLibraryItemFilesConfig { + if in == nil { + return nil + } + out := new(ContentLibraryItemFilesConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControlPlaneEndpoint) DeepCopyInto(out *ControlPlaneEndpoint) { *out = *in @@ -225,7 +289,18 @@ func (in *EnvVarList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EnvVarSpec) DeepCopyInto(out *EnvVarSpec) { *out = *in + if in.VCenterSimulator != nil { + in, out := &in.VCenterSimulator, &out.VCenterSimulator + *out = new(NamespacedRef) + **out = **in + } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint in.Cluster.DeepCopyInto(&out.Cluster) + if in.VMOperatorDependencies != nil { + in, out := &in.VMOperatorDependencies, &out.VMOperatorDependencies + *out = new(NamespacedRef) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSpec. @@ -260,6 +335,21 @@ func (in *EnvVarStatus) DeepCopy() *EnvVarStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespacedRef) DeepCopyInto(out *NamespacedRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedRef. +func (in *NamespacedRef) DeepCopy() *NamespacedRef { + if in == nil { + return nil + } + out := new(NamespacedRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VCenterSimulator) DeepCopyInto(out *VCenterSimulator) { *out = *in @@ -398,3 +488,148 @@ func (in *VCenterSimulatorStatus) DeepCopy() *VCenterSimulatorStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenterSpec) DeepCopyInto(out *VCenterSpec) { + *out = *in + in.ContentLibrary.DeepCopyInto(&out.ContentLibrary) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenterSpec. +func (in *VCenterSpec) DeepCopy() *VCenterSpec { + if in == nil { + return nil + } + out := new(VCenterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMOperatorDependencies) DeepCopyInto(out *VMOperatorDependencies) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMOperatorDependencies. +func (in *VMOperatorDependencies) DeepCopy() *VMOperatorDependencies { + if in == nil { + return nil + } + out := new(VMOperatorDependencies) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VMOperatorDependencies) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMOperatorDependenciesList) DeepCopyInto(out *VMOperatorDependenciesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VCenterSimulator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMOperatorDependenciesList. +func (in *VMOperatorDependenciesList) DeepCopy() *VMOperatorDependenciesList { + if in == nil { + return nil + } + out := new(VMOperatorDependenciesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VMOperatorDependenciesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMOperatorDependenciesSpec) DeepCopyInto(out *VMOperatorDependenciesSpec) { + *out = *in + if in.OperatorRef != nil { + in, out := &in.OperatorRef, &out.OperatorRef + *out = new(VMOperatorRef) + **out = **in + } + if in.VCenter != nil { + in, out := &in.VCenter, &out.VCenter + *out = new(VCenterSpec) + (*in).DeepCopyInto(*out) + } + if in.VCenterSimulatorRef != nil { + in, out := &in.VCenterSimulatorRef, &out.VCenterSimulatorRef + *out = new(NamespacedRef) + **out = **in + } + if in.StorageClasses != nil { + in, out := &in.StorageClasses, &out.StorageClasses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VirtualMachineClasses != nil { + in, out := &in.VirtualMachineClasses, &out.VirtualMachineClasses + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMOperatorDependenciesSpec. +func (in *VMOperatorDependenciesSpec) DeepCopy() *VMOperatorDependenciesSpec { + if in == nil { + return nil + } + out := new(VMOperatorDependenciesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMOperatorDependenciesStatus) DeepCopyInto(out *VMOperatorDependenciesStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMOperatorDependenciesStatus. +func (in *VMOperatorDependenciesStatus) DeepCopy() *VMOperatorDependenciesStatus { + if in == nil { + return nil + } + out := new(VMOperatorDependenciesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMOperatorRef) DeepCopyInto(out *VMOperatorRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMOperatorRef. +func (in *VMOperatorRef) DeepCopy() *VMOperatorRef { + if in == nil { + return nil + } + out := new(VMOperatorRef) + in.DeepCopyInto(out) + return out +} diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml index a5d02c71d1..50c14c328e 100644 --- a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml @@ -37,8 +37,7 @@ spec: description: EnvVarSpec defines the desired state of the EnvVar. properties: cluster: - description: ClusterEnvVarSpec defines the spec for the EnvVar generator - targeting a specific Cluster API cluster. + description: Cluster specific values to use as source for EnvVar values. properties: cluster: description: 'Cluster specifies the VCenter Cluster for the Cluster @@ -73,6 +72,9 @@ spec: name: description: The name of the Cluster API cluster. type: string + namespace: + description: The namespace of the Cluster API cluster. + type: string powerOffMode: description: 'The PowerOffMode for the machines in the cluster. Default: trySoft' @@ -87,9 +89,50 @@ spec: type: integer required: - name + - namespace + type: object + controlPlaneEndpoint: + description: Name of the ControlPlaneEndpoint instance to use as source + for EnvVar values. + properties: + name: + description: Name of the referenced object. + type: string + namespace: + description: Namespace of the referenced object. If empty, it + defaults to the namespace of the parent object. + type: string type: object vCenterSimulator: - type: string + description: Name of the VCenterSimulator instance to use as source + for EnvVar values. + properties: + name: + description: Name of the referenced object. + type: string + namespace: + description: Namespace of the referenced object. If empty, it + defaults to the namespace of the parent object. + type: string + type: object + vmOperatorDependencies: + description: 'Name of the VMOperatorDependencies instance to use as + source for EnvVar values. If not specified, a default dependenciesConfig + that works for vcsim is used. NOTE: this is required only for supervisor + mode; also: - the system automatically picks the first StorageClass + defined in the VMOperatorDependencies - the system automatically + picks the first VirtualMachine class defined in the VMOperatorDependencies + - the system automatically picks the first Image from the content + library defined in the VMOperatorDependencies' + properties: + name: + description: Name of the referenced object. + type: string + namespace: + description: Namespace of the referenced object. If empty, it + defaults to the namespace of the parent object. + type: string + type: object type: object status: description: EnvVarStatus defines the observed state of the EnvVar. diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml new file mode 100644 index 0000000000..76cba4eb61 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: vmoperatordependencies.vcsim.infrastructure.cluster.x-k8s.io +spec: + group: vcsim.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: VMOperatorDependencies + listKind: VMOperatorDependenciesList + plural: vmoperatordependencies + singular: vmoperatordependencies + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: VMOperatorDependencies is the schema for a VM operator dependencies. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VMOperatorDependenciesSpec defines the desired state of the + VMOperatorDependencies in the namespace where this object is created. + properties: + operatorRef: + description: OperatorRef provides a reference to the running instance + of vm-operator. + properties: + namespace: + description: Namespace where the vm-operator is running. + type: string + type: object + storageClasses: + description: StorageClasses defines a list of StorageClasses to be + bound to the namespace where this object is created. + items: + type: string + type: array + vCenter: + description: VCenter defines info about the vCenter instance that + the vm-operator interacts with. Only one between this field and + VCenterSimulatorRef must be set. + properties: + cluster: + type: string + contentLibrary: + properties: + datastore: + type: string + items: + items: + properties: + datacenter: + type: string + files: + items: + properties: + content: + format: byte + type: string + name: + type: string + type: object + type: array + itemType: + type: string + osInfo: + type: string + productInfo: + type: string + type: object + type: array + name: + type: string + type: object + datacenter: + description: supervisor is based on a single vCenter cluster + type: string + folder: + type: string + password: + type: string + resourcePool: + type: string + serverURL: + type: string + storagePolicy: + type: string + thumbprint: + type: string + username: + type: string + type: object + vCenterSimulatorRef: + description: VCenterSimulatorRef defines info about the vCenter simulator + instance that the vm-operator interacts with. Only one between this + field and VCenter must be set. + properties: + name: + description: Name of the referenced object. + type: string + namespace: + description: Namespace of the referenced object. If empty, it + defaults to the namespace of the parent object. + type: string + type: object + virtualMachineClasses: + description: VirtualMachineClasses defines a list of VirtualMachineClasses + to be bound to the namespace where this object is created. + items: + type: string + type: array + type: object + status: + description: VMOperatorDependenciesStatus defines the observed state of + the VMOperatorDependencies. + properties: + ready: + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/infrastructure/vcsim/config/crd/kustomization.yaml b/test/infrastructure/vcsim/config/crd/kustomization.yaml index 7523b46d0a..0f9667c1af 100644 --- a/test/infrastructure/vcsim/config/crd/kustomization.yaml +++ b/test/infrastructure/vcsim/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml - bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml - bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml + - bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. @@ -20,6 +21,7 @@ patchesStrategicMerge: - patches/cainjection_in_vcentersimulators.yaml - patches/cainjection_in_controlplaneendpoints.yaml - patches/cainjection_in_envvars.yaml + - patches/cainjection_in_vmoperatordependencies.yaml # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: diff --git a/test/infrastructure/vcsim/config/crd/patches/cainjection_in_vmoperatordependencies.yaml b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_vmoperatordependencies.yaml new file mode 100644 index 0000000000..dfc38db61c --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_vmoperatordependencies.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: vmoperatordependencies.vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/default/kustomization.yaml b/test/infrastructure/vcsim/config/default/kustomization.yaml index be5c023f2e..bb308b26ef 100644 --- a/test/infrastructure/vcsim/config/default/kustomization.yaml +++ b/test/infrastructure/vcsim/config/default/kustomization.yaml @@ -1,11 +1,11 @@ -namespace: capvsim-system +namespace: vcsim-system -namePrefix: capvsim- +namePrefix: vcsim- commonLabels: # capvsim is not a provider, but by adding this label - # we can get this installed by Cluster APIs Tiltfile. - cluster.x-k8s.io/provider: "infrastructure-vsphere-simulator" + # we can get this installed by Cluster APIs Tiltfile and by the clusterctl machinery we use in E2E tests. + cluster.x-k8s.io/provider: "runtime-extension-vcsim" resources: - namespace.yaml diff --git a/test/infrastructure/vcsim/config/rbac/role.yaml b/test/infrastructure/vcsim/config/rbac/role.yaml index d574c2ad2d..587b3ad95d 100644 --- a/test/infrastructure/vcsim/config/rbac/role.yaml +++ b/test/infrastructure/vcsim/config/rbac/role.yaml @@ -99,6 +99,7 @@ rules: - create - get - list + - update - watch - apiGroups: - vcsim.infrastructure.cluster.x-k8s.io @@ -151,6 +152,23 @@ rules: - get - patch - update +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - vmoperatordependencies + verbs: + - get + - list + - patch + - watch +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - vmoperatordependencies/status + verbs: + - get + - patch + - update - apiGroups: - vmoperator.vmware.com resources: diff --git a/test/infrastructure/vcsim/controllers/envvar_controller.go b/test/infrastructure/vcsim/controllers/envvar_controller.go index 8085480aa6..cf6d2a8a0f 100644 --- a/test/infrastructure/vcsim/controllers/envvar_controller.go +++ b/test/infrastructure/vcsim/controllers/envvar_controller.go @@ -20,12 +20,16 @@ import ( "context" "crypto/rand" "crypto/rsa" + "fmt" + "net" "strconv" "sync" "github.com/pkg/errors" "golang.org/x/crypto/ssh" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -35,6 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" ) @@ -64,31 +69,43 @@ func (r *EnvVarReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c } return ctrl.Result{}, err } + if envVar.Spec.Cluster.Namespace == "" { + envVar.Spec.Cluster.Namespace = envVar.Namespace + } // Fetch the VCenterSimulator instance - if envVar.Spec.VCenterSimulator == "" { - return ctrl.Result{}, errors.New("Spec.VCenter cannot be empty") - } + var vCenterSimulator *vcsimv1.VCenterSimulator + if envVar.Spec.VCenterSimulator != nil { + if envVar.Spec.VCenterSimulator.Name == "" { + return ctrl.Result{}, errors.New("Spec.VCenterSimulator.Name cannot be empty") + } + if envVar.Spec.VCenterSimulator.Namespace == "" { + envVar.Spec.VCenterSimulator.Namespace = envVar.Namespace + } - vCenterSimulator := &vcsimv1.VCenterSimulator{} - if err := r.Client.Get(ctx, client.ObjectKey{ - Namespace: envVar.Namespace, - Name: envVar.Spec.VCenterSimulator, - }, vCenterSimulator); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to get VCenter") + vCenterSimulator = &vcsimv1.VCenterSimulator{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: envVar.Spec.VCenterSimulator.Namespace, + Name: envVar.Spec.VCenterSimulator.Name, + }, vCenterSimulator); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get VCenter") + } + log = log.WithValues("VCenter", klog.KObj(vCenterSimulator)) + ctx = ctrl.LoggerInto(ctx, log) } - log = log.WithValues("VCenter", klog.KObj(vCenterSimulator)) - ctx = ctrl.LoggerInto(ctx, log) // Fetch the ControlPlaneEndpoint instance - if envVar.Spec.Cluster.Name == "" { - return ctrl.Result{}, errors.New("Spec.Cluster.Name cannot be empty") + if envVar.Spec.ControlPlaneEndpoint.Name == "" { + return ctrl.Result{}, errors.New("Spec.ControlPlaneEndpoint.Name cannot be empty") + } + if envVar.Spec.ControlPlaneEndpoint.Namespace == "" { + envVar.Spec.ControlPlaneEndpoint.Namespace = envVar.Namespace } controlPlaneEndpoint := &vcsimv1.ControlPlaneEndpoint{} if err := r.Client.Get(ctx, client.ObjectKey{ - Namespace: envVar.Namespace, - Name: envVar.Spec.Cluster.Name, + Namespace: envVar.Spec.ControlPlaneEndpoint.Namespace, + Name: envVar.Spec.ControlPlaneEndpoint.Name, }, controlPlaneEndpoint); err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to get ControlPlaneEndpoint") } @@ -124,7 +141,7 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. if controlPlaneEndpoint.Status.Host == "" { return ctrl.Result{Requeue: true}, nil } - if vCenterSimulator.Status.Host == "" { + if vCenterSimulator != nil && vCenterSimulator.Status.Host == "" { return ctrl.Result{Requeue: true}, nil } @@ -135,7 +152,7 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. r.sshKeys = map[string]string{} } - key := klog.KObj(vCenterSimulator).String() + key := klog.KObj(envVar).String() sshKey, ok := r.sshKeys[key] if !ok { bitSize := 4096 @@ -161,7 +178,7 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. "VSPHERE_SSH_AUTHORIZED_KEY": sshKey, // other variables required by the cluster template. - "NAMESPACE": vCenterSimulator.Namespace, + "NAMESPACE": envVar.Spec.Cluster.Namespace, "CLUSTER_NAME": envVar.Spec.Cluster.Name, "KUBERNETES_VERSION": ptr.Deref(envVar.Spec.Cluster.KubernetesVersion, "v1.28.0"), "CONTROL_PLANE_MACHINE_COUNT": strconv.Itoa(int(ptr.Deref(envVar.Spec.Cluster.ControlPlaneMachines, 1))), @@ -175,34 +192,132 @@ func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1. // Variables below are generated using the same utilities used both also for E2E tests setup. if r.SupervisorMode { // variables for supervisor mode derived from the vCenterSimulator - for k, v := range vCenterSimulator.SupervisorVariables() { + for k, v := range vCenterSimulatorCommonVariables(vCenterSimulator) { envVar.Status.Variables[k] = v } // Variables for supervisor mode derived from how do we setup dependency for vm-operator - for k, v := range dependenciesForVCenterSimulator(vCenterSimulator).Variables() { + // NOTE: if the VMOperatorDependencies to use is not specified, we use a default dependenciesConfig that works for vcsim. + dependenciesConfig := &vcsimv1.VMOperatorDependencies{ObjectMeta: metav1.ObjectMeta{Namespace: corev1.NamespaceDefault}} + dependenciesConfig.SetVCenterFromVCenterSimulator(vCenterSimulator) + + if envVar.Spec.VMOperatorDependencies != nil { + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: envVar.Spec.VMOperatorDependencies.Namespace, + Name: envVar.Spec.VMOperatorDependencies.Name, + }, dependenciesConfig); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get VMOperatorDependencies") + } + } + + for k, v := range vmOperatorDependenciesSupervisorVariables(dependenciesConfig) { envVar.Status.Variables[k] = v } // variables for supervisor mode derived from envVar.Spec.Cluster - for k, v := range envVar.Spec.Cluster.SupervisorVariables() { + for k, v := range clusterEnvVarSpecSupervisorVariables(&envVar.Spec.Cluster) { envVar.Status.Variables[k] = v } return ctrl.Result{}, nil } // variables for govmomi mode derived from the vCenterSimulator - for k, v := range vCenterSimulator.GovmomiVariables() { + for k, v := range vCenterSimulatorCommonVariables(vCenterSimulator) { envVar.Status.Variables[k] = v } // variables for govmomi mode derived from envVar.Spec.Cluster - for k, v := range envVar.Spec.Cluster.GovmomiVariables() { + for k, v := range clusterEnvVarSpecGovmomiVariables(&envVar.Spec.Cluster) { envVar.Status.Variables[k] = v } return ctrl.Result{}, nil } +// vCenterSimulatorSupervisorVariables returns name/value pairs for a VCenterSimulator to be used for clusterctl templates when testing both in supervisor and govmomi mode. +func vCenterSimulatorCommonVariables(v *vcsimv1.VCenterSimulator) map[string]string { + if v == nil { + return nil + } + host := v.Status.Host + + // NOTE: best effort reverting back to local host because the assumption is that the vcsim controller pod will be port-forwarded on local host + _, port, err := net.SplitHostPort(host) + if err == nil { + host = net.JoinHostPort("127.0.0.1", port) + } + + return map[string]string{ + "VSPHERE_SERVER": fmt.Sprintf("https://%s", v.Status.Host), + "VSPHERE_USERNAME": v.Status.Username, + "VSPHERE_PASSWORD": v.Status.Password, + "VSPHERE_TLS_THUMBPRINT": v.Status.Thumbprint, + "VSPHERE_STORAGE_POLICY": vcsimhelpers.DefaultStoragePolicyName, + + // variables to set up govc for working with the vcsim instance. + "GOVC_URL": fmt.Sprintf("https://%s:%s@%s/sdk", v.Status.Username, v.Status.Password, host), + "GOVC_INSECURE": "true", + } +} + +// clusterEnvVarSpecCommonVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing both in supervisor and govmomi mode. +func clusterEnvVarSpecCommonVariables(c *vcsimv1.ClusterEnvVarSpec) map[string]string { + return map[string]string{ + "VSPHERE_POWER_OFF_MODE": ptr.Deref(c.PowerOffMode, "trySoft"), + } +} + +// clusterEnvVarSpecSupervisorVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing supervisor mode. +func clusterEnvVarSpecSupervisorVariables(c *vcsimv1.ClusterEnvVarSpec) map[string]string { + return clusterEnvVarSpecCommonVariables(c) +} + +// clusterEnvVarSpecGovmomiVariables returns name/value pairs for a ClusterEnvVarSpec to be used for clusterctl templates when testing govmomi mode. +func clusterEnvVarSpecGovmomiVariables(c *vcsimv1.ClusterEnvVarSpec) map[string]string { + vars := clusterEnvVarSpecCommonVariables(c) + + datacenter := int(ptr.Deref(c.Datacenter, 0)) + datastore := int(ptr.Deref(c.Datastore, 0)) + cluster := int(ptr.Deref(c.Cluster, 0)) + + // Pick the template for the given Kubernetes version if any, otherwise the template for the latest + // version defined in the model. + template := vcsimhelpers.DefaultVMTemplates[len(vcsimhelpers.DefaultVMTemplates)-1] + if c.KubernetesVersion != nil { + template = fmt.Sprintf("ubuntu-2204-kube-%s", *c.KubernetesVersion) + } + + // NOTE: omitting cluster Name intentionally because E2E tests provide this value in other ways + vars["VSPHERE_DATACENTER"] = vcsimhelpers.DatacenterName(datacenter) + vars["VSPHERE_DATASTORE"] = vcsimhelpers.DatastoreName(datastore) + vars["VSPHERE_FOLDER"] = vcsimhelpers.VMFolderName(datacenter) + vars["VSPHERE_NETWORK"] = vcsimhelpers.NetworkPath(datacenter, vcsimhelpers.DefaultNetworkName) + vars["VSPHERE_RESOURCE_POOL"] = vcsimhelpers.ResourcePoolPath(datacenter, cluster) + vars["VSPHERE_TEMPLATE"] = vcsimhelpers.VMPath(datacenter, template) + return vars +} + +// vmOperatorDependenciesSupervisorVariables returns name/value pairs for a VCenterSimulator to be used for VMOperatorDependencies templates when testing supervisor mode. +// NOTE: +// - the system automatically picks the first StorageClass defined in the VMOperatorDependencies. +// - the system automatically picks the first VirtualMachine class defined in the VMOperatorDependencies. +// - the system automatically picks the first Image from the content library defined in the VMOperatorDependencies. +func vmOperatorDependenciesSupervisorVariables(d *vcsimv1.VMOperatorDependencies) map[string]string { + vars := map[string]string{} + if d.Spec.VCenter != nil { + vars["VSPHERE_STORAGE_POLICY"] = d.Spec.VCenter.StoragePolicy + } + if len(d.Spec.StorageClasses) > 0 { + vars["VSPHERE_STORAGE_CLASS"] = d.Spec.StorageClasses[0] + } + if len(d.Spec.VirtualMachineClasses) > 0 { + vars["VSPHERE_MACHINE_CLASS_NAME"] = d.Spec.VirtualMachineClasses[0] + } + if len(d.Spec.VCenter.ContentLibrary.Items) > 0 { + vars["VSPHERE_IMAGE_NAME"] = d.Spec.VCenter.ContentLibrary.Items[0].Name + } + return vars +} + func (r *EnvVarReconciler) reconcileDelete(_ context.Context, _ *vcsimv1.EnvVar, _ *vcsimv1.VCenterSimulator, _ *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { return ctrl.Result{}, nil } diff --git a/test/infrastructure/vcsim/controllers/vcsim_controller.go b/test/infrastructure/vcsim/controllers/vcsim_controller.go index db3ed7a9e5..5f5e3b0209 100644 --- a/test/infrastructure/vcsim/controllers/vcsim_controller.go +++ b/test/infrastructure/vcsim/controllers/vcsim_controller.go @@ -50,7 +50,6 @@ import ( vcsimhelpers "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vmoperator" vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/images" ) const ( @@ -79,7 +78,7 @@ type VCenterSimulatorReconciler struct { // +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=vcentersimulators,verbs=get;list;watch;patch // +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=vcentersimulators/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=availabilityzones,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=availabilityzones,verbs=get;list;watch;create;update // +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachineclasses,verbs=get;list;watch;create // +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachineclassbindings,verbs=get;list;watch;create // +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=contentlibraryproviders,verbs=get;list;watch;create @@ -222,11 +221,14 @@ func (r *VCenterSimulatorReconciler) reconcileNormal(ctx context.Context, vCente // - A set of objects/configurations in the vCenterSimulator cluster the vm-operator is pointing to // - A set of Kubernetes object the vm-operator relies on - // To mimic the supervisor cluster, there will be only one vm-operator instance for each management cluster; - // also, the logic below should consider that the instance of the vm-operator is bound to a specific vCenterSimulator cluster. - config := dependenciesForVCenterSimulator(vCenterSimulator) + // Automatically configure the default namespace with dependencies required by the vm-operator to work with vcsim. + // if order to reconcile VirtualMachine objects (this simplifies setup when working the Tilt use case). + // NOTE: if necessary to reconcile VirtualMachine objects in different namespaces, it is required to + // manually create VMOperatorDependencies objects, one for each namespace. + dependenciesConfig := &vcsimv1.VMOperatorDependencies{ObjectMeta: metav1.ObjectMeta{Namespace: corev1.NamespaceDefault}} + dependenciesConfig.SetVCenterFromVCenterSimulator(vCenterSimulator) - if err := vmoperator.ReconcileDependencies(ctx, r.Client, config); err != nil { + if err := vmoperator.ReconcileDependencies(ctx, r.Client, dependenciesConfig); err != nil { return err } @@ -235,7 +237,7 @@ func (r *VCenterSimulatorReconciler) reconcileNormal(ctx context.Context, vCente // In order to make things to work in vcsim, there is the vmIP reconciler, which requires // some info about the vcsim instance; in order to do so, we are creating a Secret. - if err := addPreRequisitesForVMIPreconciler(ctx, r.Client, config); err != nil { + if err := addPreRequisitesForVMIPReconciler(ctx, r.Client, dependenciesConfig); err != nil { return err } } @@ -274,76 +276,26 @@ func createVMTemplates(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSim return nil } -// dependenciesForVCenterSimulator return a dependency config for a vCenterSimulator. -// Note: This config uses cluster DC0/C0, datastore LocalDS_0 in vcsim; it also sets up content library -// and the default namespace (the namespace where workload cluster are going to be deployed) with just -// what is required to reconcile VirtualMachines with the vm-operator. -func dependenciesForVCenterSimulator(vCenterSimulator *vcsimv1.VCenterSimulator) *vmoperator.Dependencies { - datacenter := 0 - cluster := 0 - datastore := 0 - - config := &vmoperator.Dependencies{ - // This is where tilt deploys the vm-operator - Namespace: vmoperator.DefaultNamespace, - - VCenterCluster: vmoperator.VCenterClusterConfig{ - ServerURL: vCenterSimulator.Status.Host, - Username: vCenterSimulator.Status.Username, - Password: vCenterSimulator.Status.Password, - Thumbprint: vCenterSimulator.Status.Thumbprint, - Datacenter: vcsimhelpers.DatacenterName(datacenter), - Cluster: vcsimhelpers.ClusterPath(datacenter, cluster), - Folder: vcsimhelpers.VMFolderName(datacenter), - ResourcePool: vcsimhelpers.ResourcePoolPath(datacenter, cluster), - StoragePolicy: vcsimhelpers.DefaultStoragePolicyName, - - // Those are settings for a fake content library we are going to create given that it doesn't exists in vcsim by default. - // It contains a single dummy image. - ContentLibrary: vmoperator.ContentLibraryConfig{ - Name: "vcsim", - Datastore: vcsimhelpers.DatastorePath(datacenter, datastore), - Item: vmoperator.ContentLibraryItemConfig{ - Name: "vcsim-default-image", - Files: []vmoperator.ContentLibraryItemFilesConfig{ // TODO: check if we really need both - { - Name: "ttylinux-pc_i486-16.1.ovf", - Content: images.SampleOVF, - }, - }, - ItemType: "ovf", - ProductInfo: "dummy-productInfo", - OSInfo: "dummy-OSInfo", - }, - }, - }, - - // The users are expected to store Cluster API clusters to be managed by the vm-operator - // in the default namespace and to use the "vcsim-default" storage class. - UserNamespace: vmoperator.UserNamespaceConfig{ - Name: corev1.NamespaceDefault, - StorageClass: "vcsim-default-storage-class", - VirtualMachineClass: "vcsim-default-vm-class", - }, - } - return config -} - -func addPreRequisitesForVMIPreconciler(ctx context.Context, c client.Client, config *vmoperator.Dependencies) error { +func addPreRequisitesForVMIPReconciler(ctx context.Context, c client.Client, config *vcsimv1.VMOperatorDependencies) error { log := ctrl.LoggerFrom(ctx) log.Info("Reconciling requirements for the Fake net-operator Deployment") + // default the OperatorRef if not specified. + if config.Spec.OperatorRef == nil { + config.Spec.OperatorRef = &vcsimv1.VMOperatorRef{Namespace: vmoperator.DefaultNamespace} + } + netOperatorSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: netConfigMapName, - Namespace: config.Namespace, + Namespace: config.Spec.OperatorRef.Namespace, }, StringData: map[string]string{ - netConfigServerURLKey: config.VCenterCluster.ServerURL, - netConfigDatacenterKey: config.VCenterCluster.Datacenter, - netConfigUsernameKey: config.VCenterCluster.Username, - netConfigPasswordKey: config.VCenterCluster.Password, - netConfigThumbprintKey: config.VCenterCluster.Thumbprint, + netConfigServerURLKey: config.Spec.VCenter.ServerURL, + netConfigDatacenterKey: config.Spec.VCenter.Datacenter, + netConfigUsernameKey: config.Spec.VCenter.Username, + netConfigPasswordKey: config.Spec.VCenter.Password, + netConfigThumbprintKey: config.Spec.VCenter.Thumbprint, }, Type: corev1.SecretTypeOpaque, } diff --git a/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go b/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go new file mode 100644 index 0000000000..5614a94586 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go @@ -0,0 +1,108 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vmoperator" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type VMOperatorDependenciesReconciler struct { + Client client.Client + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=vmoperatordependencies,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=vmoperatordependencies/status,verbs=get;update;patch + +func (r *VMOperatorDependenciesReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + // Fetch the VMOperatorDependencies instance + vmOperatorDependencies := &vcsimv1.VMOperatorDependencies{} + if err := r.Client.Get(ctx, req.NamespacedName, vmOperatorDependencies); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(vmOperatorDependencies, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the VMOperatorDependencies object and status after each reconciliation. + defer func() { + if err := patchHelper.Patch(ctx, vmOperatorDependencies); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Handle deleted VMOperatorDependencies + if !vmOperatorDependencies.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, vmOperatorDependencies) + } + + // Handle non-deleted VMOperatorDependencies + return ctrl.Result{}, r.reconcileNormal(ctx, vmOperatorDependencies) +} + +func (r *VMOperatorDependenciesReconciler) reconcileNormal(ctx context.Context, vmOperatorDependencies *vcsimv1.VMOperatorDependencies) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VCSim VMOperatorDependencies") + + err := vmoperator.ReconcileDependencies(ctx, r.Client, vmOperatorDependencies) + if err != nil { + vmOperatorDependencies.Status.Ready = false + return err + } + + vmOperatorDependencies.Status.Ready = true + return nil +} + +func (r *VMOperatorDependenciesReconciler) reconcileDelete(_ context.Context, _ *vcsimv1.VMOperatorDependencies) (ctrl.Result, error) { + // TODO: cleanup dependencies + return ctrl.Result{}, nil +} + +// SetupWithManager will add watches for this controller. +func (r *VMOperatorDependenciesReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + err := ctrl.NewControllerManagedBy(mgr). + For(&vcsimv1.VMOperatorDependencies{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Complete(r) + + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + return nil +} diff --git a/test/infrastructure/vcsim/main.go b/test/infrastructure/vcsim/main.go index 612277b1dd..6f4b56421e 100644 --- a/test/infrastructure/vcsim/main.go +++ b/test/infrastructure/vcsim/main.go @@ -84,10 +84,12 @@ var ( diagnosticsOptions = flags.DiagnosticsOptions{} logOptions = logs.NewOptions() // vcsim specific flags. - vmConcurrency int - vCenterConcurrency int - fakeAPIServerConcurrency int - envsubstConcurrency int + vSphereVMConcurrency int + virtualMachineConcurrency int + vCenterSimulatorConcurrency int + controlPlaneEndpointConcurrency int + envsubstConcurrency int + vmOperatorDependenciesConcurrency int // vsphere session specific flags. enableKeepAlive bool keepAliveDuration time.Duration @@ -139,17 +141,23 @@ func InitFlags(fs *pflag.FlagSet) { fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, "Enable block profiling") - fs.IntVar(&vmConcurrency, "vm-concurrency", 10, - "Number of vsphere VM to process simultaneously") + fs.IntVar(&vSphereVMConcurrency, "vsphere-vm-concurrency", 10, + "Number of VSphereVM to process simultaneously") - fs.IntVar(&vCenterConcurrency, "vcenter-concurrency", 10, - "Number of vcenter server to process simultaneously") + fs.IntVar(&virtualMachineConcurrency, "virtual-machine-concurrency", 10, + "Number of VirtualMachine to process simultaneously") - fs.IntVar(&fakeAPIServerConcurrency, "fake-apiserver-endpoint-concurrency", 10, - "Number of vcsim control plane endpoint to process simultaneously") + fs.IntVar(&vCenterSimulatorConcurrency, "vcenter-simulator-concurrency", 10, + "Number of VCenterSimulator to process simultaneously") + + fs.IntVar(&controlPlaneEndpointConcurrency, "controlplane-endpoint-concurrency", 10, + "Number of ControlPlaneEndpoint to process simultaneously") fs.IntVar(&envsubstConcurrency, "envsubst-concurrency", 10, - "Number of envsubst to process simultaneously") + "Number of Envsubst to process simultaneously") + + fs.IntVar(&vmOperatorDependenciesConcurrency, "vm-operator-dependencies-concurrency", 10, + "Number of VMOperatorDependencies to process simultaneously") fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "The minimum interval at which watched resources are reconciled (e.g. 15m)") @@ -327,7 +335,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, supervisorMode bool Client: mgr.GetClient(), SupervisorMode: supervisorMode, WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(vCenterConcurrency)); err != nil { + }).SetupWithManager(ctx, mgr, concurrency(vCenterSimulatorConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "VCenterSimulatorReconciler") os.Exit(1) } @@ -338,7 +346,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, supervisorMode bool APIServerMux: apiServerMux, PodIP: podIP, WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(fakeAPIServerConcurrency)); err != nil { + }).SetupWithManager(ctx, mgr, concurrency(controlPlaneEndpointConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ControlPlaneEndpointReconciler") os.Exit(1) } @@ -351,10 +359,18 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, supervisorMode bool EnableKeepAlive: enableKeepAlive, KeepAliveDuration: keepAliveDuration, WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(vmConcurrency)); err != nil { + }).SetupWithManager(ctx, mgr, concurrency(virtualMachineConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "VirtualMachineReconciler") os.Exit(1) } + + if err := (&controllers.VMOperatorDependenciesReconciler{ + Client: mgr.GetClient(), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(vmOperatorDependenciesConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "VMOperatorDependenciesReconciler") + os.Exit(1) + } } else { if err := (&controllers.VSphereVMReconciler{ Client: mgr.GetClient(), @@ -363,7 +379,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, supervisorMode bool EnableKeepAlive: enableKeepAlive, KeepAliveDuration: keepAliveDuration, WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(vmConcurrency)); err != nil { + }).SetupWithManager(ctx, mgr, concurrency(vSphereVMConcurrency)); err != nil { setupLog.Error(err, "unable to create controller", "controller", "VSphereVMReconciler") os.Exit(1) } diff --git a/test/infrastructure/vcsim/scripts/vcsim.sh b/test/infrastructure/vcsim/scripts/vcsim.sh index fc8b787fec..9e92dbb0e4 100755 --- a/test/infrastructure/vcsim/scripts/vcsim.sh +++ b/test/infrastructure/vcsim/scripts/vcsim.sh @@ -61,13 +61,16 @@ fi if eval "kubectl get envvar $CLUSTER_NAME &> /dev/null"; then echo "using existing EnvVar $CLUSTER_NAME" else - kubectl apply -f - &> /dev/null <