diff --git a/doc/datavolumes.md b/doc/datavolumes.md index f543c430e1..793a4831b3 100644 --- a/doc/datavolumes.md +++ b/doc/datavolumes.md @@ -349,6 +349,13 @@ spec: [Get VDDK ConfigMap example](../manifests/example/vddk-configmap.yaml) [Ways to find thumbprint](https://libguestfs.org/nbdkit-vddk-plugin.1.html#THUMBPRINTS) +#### Extra VDDK Configuration Options + +The VDDK library itself looks in a configuration file (such as `/etc/vmware/config`) for extra options to fine tune data transfers. To pass these options through to the VDDK, store the configuration file contents in a ConfigMap with the key `vddk-config-file` and add a `cdi.kubevirt.io/storage.pod.vddk.extraargs` annotation to the DataVolume specification. The ConfigMap will be mounted to the importer pod as a volume, and the mount directory will have a file named `vddk-config-file` with the contents of the file. This means that the ConfigMap must be placed in the same namespace as the DataVolume, and the ConfigMap should only have one file entry, `vddk-config-file`. + +[Example annotation](../manifests/example/vddk-args-annotation.yaml) +[Example ConfigMap](../manifests/example/vddk-args-configmap.yaml) + ## Multi-stage Import In a multi-stage import, multiple pods are started in succession to copy different parts of the source to an existing base disk image. Currently only the [ImageIO](#multi-stage-imageio-import) and [VDDK](#multi-stage-vddk-import) data sources support multi-stage imports. diff --git a/manifests/example/vddk-args-annotation.yaml b/manifests/example/vddk-args-annotation.yaml new file mode 100644 index 0000000000..fcada06358 --- /dev/null +++ b/manifests/example/vddk-args-annotation.yaml @@ -0,0 +1,22 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + name: "vddk-dv" + namespace: "cdi" + annotations: + cdi.kubevirt.io/storage.pod.vddk.extraargs: vddk-arguments +spec: + source: + vddk: + backingFile: "[iSCSI_Datastore] vm/vm_1.vmdk" # From 'Hard disk'/'Disk File' in vCenter/ESX VM settings + url: "https://vcenter.corp.com" + uuid: "52260566-b032-36cb-55b1-79bf29e30490" + thumbprint: "20:6C:8A:5D:44:40:B3:79:4B:28:EA:76:13:60:90:6E:49:D9:D9:A3" # SSL fingerprint of vCenter/ESX host + secretRef: "vddk-credentials" + initImageURL: "registry:5000/vddk-init:latest" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "32Gi" \ No newline at end of file diff --git a/manifests/example/vddk-args-configmap.yaml b/manifests/example/vddk-args-configmap.yaml new file mode 100644 index 0000000000..278918969e --- /dev/null +++ b/manifests/example/vddk-args-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: cdi + name: vddk-arguments +data: + vddk-config-file: -| + VixDiskLib.nfcAio.Session.BufSizeIn64KB=16 + VixDiskLib.nfcAio.Session.BufCount=4 \ No newline at end of file diff --git a/pkg/common/common.go b/pkg/common/common.go index 3e948fca63..be77b264d0 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -254,6 +254,12 @@ const ( VddkConfigDataKey = "vddk-init-image" // AwaitingVDDK is a Pending condition reason that indicates the PVC is waiting for a VDDK image AwaitingVDDK = "AwaitingVDDK" + // VddkArgsDir is the path to the volume mount containing extra VDDK arguments + VddkArgsDir = "/vddk-args" + // VddkArgsVolName is the name of the volume referencing the extra VDDK arguments ConfigMap + VddkArgsVolName = "vddk-extra-args" + // VddkArgsKeyName is the name of the key that must be present in the VDDK arguments ConfigMap + VddkArgsKeyName = "vddk-config-file" // UploadContentTypeHeader is the header upload clients may use to set the content type explicitly UploadContentTypeHeader = "x-cdi-content-type" diff --git a/pkg/controller/common/util.go b/pkg/controller/common/util.go index 8b383a4a34..9aa50ed770 100644 --- a/pkg/controller/common/util.go +++ b/pkg/controller/common/util.go @@ -147,6 +147,8 @@ const ( AnnVddkHostConnection = AnnAPIGroup + "/storage.pod.vddk.host" // AnnVddkInitImageURL saves a per-DV VDDK image URL on the PVC AnnVddkInitImageURL = AnnAPIGroup + "/storage.pod.vddk.initimageurl" + // AnnVddkExtraArgs references a ConfigMap that holds arguments to pass directly to the VDDK library + AnnVddkExtraArgs = AnnAPIGroup + "/storage.pod.vddk.extraargs" // AnnRequiresScratch provides a const for our PVC requiring scratch annotation AnnRequiresScratch = AnnAPIGroup + "/storage.import.requiresScratch" diff --git a/pkg/controller/import-controller.go b/pkg/controller/import-controller.go index 49f1ff898f..225117308d 100644 --- a/pkg/controller/import-controller.go +++ b/pkg/controller/import-controller.go @@ -116,6 +116,7 @@ type importerPodArgs struct { imagePullSecrets []corev1.LocalObjectReference workloadNodePlacement *sdkapi.NodePlacement vddkImageName *string + vddkExtraArgs *string priorityClassName string } @@ -480,6 +481,7 @@ func (r *ImportReconciler) createImporterPod(pvc *corev1.PersistentVolumeClaim) r.log.V(1).Info("Creating importer POD for PVC", "pvc.Name", pvc.Name) var scratchPvcName *string var vddkImageName *string + var vddkExtraArgs *string var err error requiresScratch := r.requiresScratchSpace(pvc) @@ -508,6 +510,11 @@ func (r *ImportReconciler) createImporterPod(pvc *corev1.PersistentVolumeClaim) } return errors.New(message) } + + if extraArgs, ok := anno[cc.AnnVddkExtraArgs]; ok && extraArgs != "" { + r.log.V(1).Info("Mounting extra VDDK args ConfigMap to importer pod", "ConfigMap", extraArgs) + vddkExtraArgs = &extraArgs + } } podEnvVar, err := r.createImportEnvVar(pvc) @@ -523,6 +530,7 @@ func (r *ImportReconciler) createImporterPod(pvc *corev1.PersistentVolumeClaim) pvc: pvc, scratchPvcName: scratchPvcName, vddkImageName: vddkImageName, + vddkExtraArgs: vddkExtraArgs, priorityClassName: cc.GetPriorityClass(pvc), } @@ -999,6 +1007,12 @@ func makeImporterContainerSpec(args *importerPodArgs) []corev1.Container { MountPath: "/opt", }) } + if args.vddkExtraArgs != nil { + containers[0].VolumeMounts = append(containers[0].VolumeMounts, corev1.VolumeMount{ + Name: common.VddkArgsVolName, + MountPath: common.VddkArgsDir, + }) + } if args.podEnvVar.certConfigMap != "" { containers[0].VolumeMounts = append(containers[0].VolumeMounts, corev1.VolumeMount{ Name: CertVolName, @@ -1070,6 +1084,18 @@ func makeImporterVolumeSpec(args *importerPodArgs) []corev1.Volume { }, }) } + if args.vddkExtraArgs != nil { + volumes = append(volumes, corev1.Volume{ + Name: common.VddkArgsVolName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: *args.vddkExtraArgs, + }, + }, + }, + }) + } if args.podEnvVar.certConfigMap != "" { volumes = append(volumes, createConfigMapVolume(CertVolName, args.podEnvVar.certConfigMap)) } diff --git a/pkg/controller/import-controller_test.go b/pkg/controller/import-controller_test.go index 96fb5e14a7..b2118cab62 100644 --- a/pkg/controller/import-controller_test.go +++ b/pkg/controller/import-controller_test.go @@ -935,6 +935,36 @@ var _ = Describe("Create Importer Pod", func() { Entry("with long PVC name", strings.Repeat("test-pvc-", 20), "snap1"), Entry("with long PVC and checkpoint names", strings.Repeat("test-pvc-", 20), strings.Repeat("repeating-checkpoint-id-", 10)), ) + + It("should mount extra VDDK arguments ConfigMap when annotation is set", func() { + pvcName := "testPvc1" + podName := "testpod" + extraArgs := "testing-123" + annotations := map[string]string{ + cc.AnnEndpoint: testEndPoint, + cc.AnnImportPod: podName, + cc.AnnSource: cc.SourceVDDK, + cc.AnnVddkInitImageURL: "testing-vddk", + cc.AnnVddkExtraArgs: extraArgs, + } + pvc := cc.CreatePvcInStorageClass(pvcName, "default", &testStorageClass, annotations, nil, corev1.ClaimBound) + reconciler := createImportReconciler(pvc) + + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: pvcName, Namespace: "default"}}) + Expect(err).ToNot(HaveOccurred()) + + pod := &corev1.Pod{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: podName, Namespace: "default"}, pod) + Expect(err).ToNot(HaveOccurred()) + + found := false // Look for vddk-args mount + for _, volume := range pod.Spec.Volumes { + if volume.ConfigMap != nil && volume.ConfigMap.Name == extraArgs { + found = true + } + } + Expect(found).To(BeTrue()) + }) }) var _ = Describe("Import test env", func() { diff --git a/pkg/image/nbdkit.go b/pkg/image/nbdkit.go index 21688b0e2c..fd5d1eb055 100644 --- a/pkg/image/nbdkit.go +++ b/pkg/image/nbdkit.go @@ -168,6 +168,13 @@ func NewNbdkitVddk(nbdkitPidFile, socket string, args NbdKitVddkPluginArgs) (Nbd pluginArgs = append(pluginArgs, "-D", "nbdkit.backend.datapath=0") pluginArgs = append(pluginArgs, "-D", "vddk.datapath=0") pluginArgs = append(pluginArgs, "-D", "vddk.stats=1") + config, err := getVddkConfig() + if err != nil { + return nil, err + } + if config != "" { + pluginArgs = append(pluginArgs, "config="+config) + } p := getVddkPluginPath() n := &Nbdkit{ NbdPidFile: nbdkitPidFile, @@ -228,6 +235,22 @@ func getVddkPluginPath() NbdkitPlugin { return NbdkitVddkPlugin } +// Extra VDDK configuration options are stored in a ConfigMap mounted to the +// importer pod. Just look for the first file in the mounted directory, and +// pass that through nbdkit via the "config=" option. +func getVddkConfig() (string, error) { + path := filepath.Join(common.VddkArgsDir, common.VddkArgsKeyName) + _, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { // No configuration file found, so no extra arguments to give to VDDK + return "", nil + } + return "", err + } + + return path, nil +} + func (n *Nbdkit) getSourceArg(s string) string { var source string switch n.plugin { diff --git a/tests/datavolume_test.go b/tests/datavolume_test.go index 0af8362fde..fae9bce629 100644 --- a/tests/datavolume_test.go +++ b/tests/datavolume_test.go @@ -10,14 +10,17 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" "github.com/google/uuid" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + core "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" @@ -3463,6 +3466,70 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", }), ) }) + + Describe("extra configuration options for VDDK imports", func() { + It("[test_id:XXXX]succeed importing VDDK data volume with extra arguments ConfigMap set", Label("VDDK"), func() { + format.MaxLength = 0 + vddkConfigOptions := []string{ + "VixDiskLib.nfcAio.Session.BufSizeIn64KB=16", + "vixDiskLib.nfcAio.Session.BufCount=4", + } + + vddkConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vddk-extras", + }, + Data: map[string]string{ + common.VddkArgsKeyName: strings.Join(vddkConfigOptions, "\n"), + }, + } + + _, err := f.K8sClient.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), vddkConfigMap, metav1.CreateOptions{}) + if !k8serrors.IsAlreadyExists(err) { + Expect(err).ToNot(HaveOccurred()) + } + + vcenterURL := fmt.Sprintf(utils.VcenterURL, f.CdiInstallNs) + dataVolume := createVddkDataVolume("import-pod-vddk-config-test", "100Mi", vcenterURL) + + By(fmt.Sprintf("Create new DataVolume %s", dataVolume.Name)) + controller.AddAnnotation(dataVolume, controller.AnnPodRetainAfterCompletion, "true") + controller.AddAnnotation(dataVolume, controller.AnnVddkExtraArgs, "vddk-extras") + dataVolume, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dataVolume) + Expect(err).ToNot(HaveOccurred()) + + By("Verify PVC was created") + pvc, err := utils.WaitForPVC(f.K8sClient, dataVolume.Namespace, dataVolume.Name) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindIfWaitForFirstConsumer(pvc) + + By("Wait for import to be completed") + err = utils.WaitForDataVolumePhase(f, dataVolume.Namespace, cdiv1.Succeeded, dataVolume.Name) + Expect(err).ToNot(HaveOccurred(), "DataVolume not in phase succeeded in time") + + By("Find importer pods after completion") + pvcName := dataVolume.Name + // When using populators, the PVC Prime name is used to build the importer pod + if usePopulator, _ := dvc.CheckPVCUsingPopulators(pvc); usePopulator { + pvcName = populators.PVCPrimeName(pvc) + } + By("Find importer pod " + pvcName) + importer, err := utils.FindPodByPrefixOnce(f.K8sClient, dataVolume.Namespace, common.ImporterPodName, common.CDILabelSelector) + Expect(err).ToNot(HaveOccurred()) + Expect(importer.DeletionTimestamp).To(BeNil()) + + Eventually(func() (string, error) { + out, err := f.K8sClient.CoreV1(). + Pods(importer.Namespace). + GetLogs(importer.Name, &core.PodLogOptions{SinceTime: &meta.Time{Time: CurrentSpecReport().StartTime}}). + DoRaw(context.Background()) + return string(out), err + }, time.Minute, pollingInterval).Should(And( + ContainSubstring(vddkConfigOptions[0]), + ContainSubstring(vddkConfigOptions[1]), + )) + }) + }) }) func SetFilesystemOverhead(f *framework.Framework, globalOverhead, scOverhead string) { diff --git a/tools/vddk-test/vddk-test-plugin.c b/tools/vddk-test/vddk-test-plugin.c index 2d23ab9e30..c0ccf788f3 100644 --- a/tools/vddk-test/vddk-test-plugin.c +++ b/tools/vddk-test/vddk-test-plugin.c @@ -13,6 +13,7 @@ #define NBDKIT_API_VERSION 2 #include #include +#include #include #include #include @@ -32,6 +33,21 @@ int fakevddk_config(const char *key, const char *value) { if (strcmp(key, "snapshot") == 0) { expected_arg_count = 9; // Expect one for 'snapshot' and one for 'transports' } + if (strcmp(key, "config") == 0) { + expected_arg_count = 8; + nbdkit_debug("Extra config option set to: %s\n", value); + + FILE *f = fopen(value, "r"); + if (f == NULL) { + nbdkit_error("Failed to open VDDK extra configuration file %s!\n", value); + return -1; + } + char extras[512]; // Importer test will scan debug log for given values, just pass them back + while (fgets(extras, 512, f) != NULL) { + nbdkit_debug("Extra configuration data: %s\n", extras); + } + fclose(f); + } return 0; }