From 85903b9693cb329bc5119d0ee50aa66ce249aef3 Mon Sep 17 00:00:00 2001 From: Alex Kalenyuk Date: Wed, 1 May 2024 11:10:41 +0300 Subject: [PATCH] Use scratch space for non-archive uploads (with host assisted from block exception) Basically a follow up to 3219 for upload sources, which suffer from the same issue of losing sparseness. Signed-off-by: Alex Kalenyuk --- cmd/cdi-cloner/clone-source.go | 9 +++++---- pkg/importer/upload-datasource.go | 4 ++-- pkg/importer/upload-datasource_test.go | 12 ++++++------ tests/upload_test.go | 4 ---- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/cmd/cdi-cloner/clone-source.go b/cmd/cdi-cloner/clone-source.go index 6e9d9cb54a..b604f63832 100644 --- a/cmd/cdi-cloner/clone-source.go +++ b/cmd/cdi-cloner/clone-source.go @@ -6,6 +6,7 @@ import ( "crypto/x509" "errors" "flag" + "fmt" "io" "net/http" "os" @@ -55,7 +56,7 @@ func (er *execReader) Close() error { } func init() { - flag.StringVar(&contentType, "content-type", "", "filesystem-clone|blockdevice-clone") + flag.StringVar(&contentType, "content-type", "", fmt.Sprintf("%s|%s", common.FilesystemCloneContentType, common.BlockdeviceClone)) flag.StringVar(&mountPoint, "mount", "", "pvc mount point") flag.Uint64Var(&uploadBytes, "upload-bytes", 0, "approx number of bytes in input") klog.InitFlags(nil) @@ -138,7 +139,7 @@ func pipeToSnappy(reader io.ReadCloser) io.ReadCloser { func validateContentType() { switch contentType { - case "filesystem-clone", "blockdevice-clone": + case common.FilesystemCloneContentType, common.BlockdeviceClone: default: klog.Fatalf("Invalid content-type %q", contentType) } @@ -202,13 +203,13 @@ func newTarReader(preallocation bool) (io.ReadCloser, error) { func getInputStream(preallocation bool) io.ReadCloser { switch contentType { - case "filesystem-clone": + case common.FilesystemCloneContentType: rc, err := newTarReader(preallocation) if err != nil { klog.Fatalf("Error creating tar reader for %q: %+v", mountPoint, err) } return rc - case "blockdevice-clone": + case common.BlockdeviceClone: rc, err := os.Open(mountPoint) if err != nil { klog.Fatalf("Error opening block device %q: %+v", mountPoint, err) diff --git a/pkg/importer/upload-datasource.go b/pkg/importer/upload-datasource.go index 643aa38b3f..786ea0d973 100644 --- a/pkg/importer/upload-datasource.go +++ b/pkg/importer/upload-datasource.go @@ -51,8 +51,8 @@ func (ud *UploadDataSource) Info() (ProcessingPhase, error) { if ud.contentType == cdiv1.DataVolumeArchive { return ProcessingPhaseTransferDataDir, nil } - if !ud.readers.Convert { - // Uploading a raw file, we can write that directly to the target. + if ud.contentType == common.BlockdeviceClone { + // The only exception to write directly is host assisted clone from block return ProcessingPhaseTransferDataFile, nil } return ProcessingPhaseTransferScratch, nil diff --git a/pkg/importer/upload-datasource_test.go b/pkg/importer/upload-datasource_test.go index bca95801e9..c9859fc9ea 100644 --- a/pkg/importer/upload-datasource_test.go +++ b/pkg/importer/upload-datasource_test.go @@ -78,7 +78,7 @@ var _ = Describe("Upload data source", func() { ud = NewUploadDataSource(file, dvKubevirt) result, err := ud.Info() Expect(err).NotTo(HaveOccurred()) - Expect(ProcessingPhaseTransferDataFile).To(Equal(result)) + Expect(ProcessingPhaseTransferScratch).To(Equal(result)) }) DescribeTable("calling transfer should", func(fileName string, dvContentType cdiv1.DataVolumeContentType, expectedPhase ProcessingPhase, scratchPath string, want []byte, wantErr bool) { @@ -136,7 +136,7 @@ var _ = Describe("Upload data source", func() { ud = NewUploadDataSource(sourceFile, dvKubevirt) result, err := ud.Info() Expect(err).NotTo(HaveOccurred()) - Expect(ProcessingPhaseTransferDataFile).To(Equal(result)) + Expect(ProcessingPhaseTransferScratch).To(Equal(result)) result, err = ud.TransferFile(filepath.Join(tmpDir, "file")) Expect(err).ToNot(HaveOccurred()) Expect(ProcessingPhaseResize).To(Equal(result)) @@ -149,7 +149,7 @@ var _ = Describe("Upload data source", func() { ud = NewUploadDataSource(sourceFile, dvKubevirt) result, err := ud.Info() Expect(err).NotTo(HaveOccurred()) - Expect(ProcessingPhaseTransferDataFile).To(Equal(result)) + Expect(ProcessingPhaseTransferScratch).To(Equal(result)) result, err = ud.TransferFile("/invalidpath/invalidfile") Expect(err).To(HaveOccurred()) Expect(ProcessingPhaseError).To(Equal(result)) @@ -211,7 +211,7 @@ var _ = Describe("Async Upload data source", func() { aud = NewAsyncUploadDataSource(file) result, err := aud.Info() Expect(err).NotTo(HaveOccurred()) - Expect(ProcessingPhaseTransferDataFile).To(Equal(result)) + Expect(ProcessingPhaseTransferScratch).To(Equal(result)) }) DescribeTable("calling transfer should", func(fileName, scratchPath string, want []byte, wantErr bool) { @@ -260,7 +260,7 @@ var _ = Describe("Async Upload data source", func() { aud = NewAsyncUploadDataSource(sourceFile) result, err := aud.Info() Expect(err).NotTo(HaveOccurred()) - Expect(ProcessingPhaseTransferDataFile).To(Equal(result)) + Expect(ProcessingPhaseTransferScratch).To(Equal(result)) result, err = aud.TransferFile(filepath.Join(tmpDir, "file")) Expect(err).ToNot(HaveOccurred()) Expect(ProcessingPhaseValidatePause).To(Equal(result)) @@ -274,7 +274,7 @@ var _ = Describe("Async Upload data source", func() { aud = NewAsyncUploadDataSource(sourceFile) result, err := aud.Info() Expect(err).NotTo(HaveOccurred()) - Expect(ProcessingPhaseTransferDataFile).To(Equal(result)) + Expect(ProcessingPhaseTransferScratch).To(Equal(result)) result, err = aud.TransferFile("/invalidpath/invalidfile") Expect(err).To(HaveOccurred()) Expect(ProcessingPhaseError).To(Equal(result)) diff --git a/tests/upload_test.go b/tests/upload_test.go index 73e7810c31..fa62e9b4a8 100644 --- a/tests/upload_test.go +++ b/tests/upload_test.go @@ -410,8 +410,6 @@ var _ = Describe("[rfe_id:138][crit:high][vendor:cnv-qe@redhat.com][level:compon same, err := f.VerifyTargetPVCContentMD5(f.Namespace, archivePVC, pathInPvc, expectedMd5) Expect(err).ToNot(HaveOccurred()) Expect(same).To(BeTrue()) - By("Verifying the image is sparse") - Expect(f.VerifySparse(f.Namespace, archivePVC, pathInPvc, utils.UploadFileSize)).To(BeTrue()) } } else { checkFailureNoValidToken(archivePVC) @@ -729,8 +727,6 @@ var _ = Describe("[rfe_id:138][crit:high][vendor:cnv-qe@redhat.com][level:compon same, err := f.VerifyTargetPVCContentMD5(f.Namespace, pvc, pathInPvc, expectedMd5) Expect(err).ToNot(HaveOccurred()) Expect(same).To(BeTrue()) - By("Verifying the image is sparse") - Expect(f.VerifySparse(f.Namespace, pvc, pathInPvc, utils.UploadFileSize)).To(BeTrue()) } } else { checkFailureNoValidToken(pvcPrime)